Пример #1
0
        def generate_caffe_prototxt(self, caffe_net, layer):
            layer = generate_caffe_prototxt(self.features, caffe_net, layer)
            layer2 = generate_caffe_prototxt(self.features2, caffe_net, layer)
            layer3 = generate_caffe_prototxt(self.features1to2, caffe_net,
                                             layer)

            layer_features = L.Eltwise(layer2, layer3, operation=P.Eltwise.SUM)
            caffe_net[self.op_name + '/features'] = layer_features

            layer = generate_caffe_prototxt(self.stage1, caffe_net,
                                            layer_features)

            layer_combine1 = L.Concat(layer_features, layer)
            caffe_net[self.op_name + '/layer_combine1'] = layer_combine1

            layer2 = generate_caffe_prototxt(self.stage2, caffe_net,
                                             layer_combine1)

            layer_combine2 = L.Concat(layer_features, layer2)
            caffe_net[self.op_name + '/layer_combine2'] = layer_combine2

            layer3 = generate_caffe_prototxt(self.stage3, caffe_net,
                                             layer_combine2)

            layer_box = generate_caffe_prototxt(self.box_regression, caffe_net,
                                                layer_features)

            return layer3, layer_box
def cnn_module(bottom, num_out):

    conv1 = conv_bn_relu_layer(bottom, 64)
    conv2 = conv_bn_relu_layer(conv1, 64)
    pool1 = L.Pooling(conv2,
                      pooling_param=dict(kernel_size=3,
                                         stride=2,
                                         pad=1,
                                         pool=P.Pooling.MAX))

    conv3 = conv_bn_relu_layer(pool1, 64)
    conv4 = conv_bn_relu_layer(conv3, 64)
    pool2 = L.Pooling(conv4,
                      pooling_param=dict(kernel_size=3,
                                         stride=2,
                                         pad=1,
                                         pool=P.Pooling.MAX))

    conv5 = conv_bn_relu_layer(pool2, 64)
    conv6 = conv_bn_relu_layer(conv5, 64)

    conv6_upsample = L.Interp(conv6, interp_param=dict(zoom_factor=4))
    conv6_upsample_crop = L.Crop(conv6_upsample, conv2)

    conv4_upsample = L.Interp(conv4, interp_param=dict(zoom_factor=2))
    conv4_upsample_crop = L.Crop(conv4_upsample, conv2)

    conv_concat = L.Concat(bottom, conv2, conv4_upsample_crop,
                           conv6_upsample_crop)

    conv7 = conv_relu_layer(conv_concat, num_out)

    conv_comb = L.Concat(bottom, conv7)

    return conv_comb
Пример #3
0
def e2e_conv(bottom, num_output, kernel_h, kernel_w,
             weight_filler=dict(type="xavier"),
             bias_filler=dict(type="constant", value=0),
             param=[dict(lr_mult=1, decay_mult=1),  # weight learning rate parameters
                    dict(lr_mult=2, decay_mult=0)]  # bias learning rate parameters
             ):
    """Implementation of the e2e filter."""

    # kernel_h x 1 convolution.
    conv_dx1 = L.Convolution(bottom, num_output=num_output, stride=1,
                             kernel_h=kernel_h, kernel_w=1,
                             weight_filler=weight_filler, bias_filler=bias_filler,
                             param=param)

    # 1 x kernel_w convolution.
    conv_1xd = L.Convolution(bottom, num_output=num_output, stride=1,
                             kernel_h=1, kernel_w=kernel_w,
                             weight_filler=weight_filler, bias_filler=bias_filler,
                             param=param)

    # Concat all the responses together.
    # For dx1, produce a dxd matrix.
    concat_dx1_dxd = L.Concat(*[conv_dx1] * kernel_w, concat_param=dict(axis=2))

    # For 1xd, produce a dxd matrix.
    concat_1xd_dxd = L.Concat(*[conv_1xd] * kernel_h, concat_param=dict(axis=3))

    # Sum the dxd matrices together element-wise.
    sum_dxd = L.Eltwise(concat_dx1_dxd, concat_1xd_dxd, eltwise_param=dict(operation=P.Eltwise.SUM))

    return sum_dxd
Пример #4
0
 def stem_v4_299x299(net, bottom):
    """
    input:3x299x299
    output:384x35x35
    :param bottom: bottom layer
    :return: layers
    """
    conv1_3x3_s2 = factorization_conv_bn_scale_relu(net, bottom, 'conv1_3x3_s2', num_output=32, kernel_size=3, stride=2)  # 32x149x149
    conv2_3x3_s1 = factorization_conv_bn_scale_relu(net, conv1_3x3_s2, 'conv2_3x3_s1',  num_output=32, kernel_size=3, stride=1)  # 32x147x147
    conv3_3x3_s1 = factorization_conv_bn_scale_relu(net, conv2_3x3_s1, 'conv3_3x3_s1', num_output=64, kernel_size=3, stride=1, pad=1)  # 64x147x147

    net.inception_stem1_pool = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 64x73x73
    inception_stem1_3x3_s2 = factorization_conv_bn_scale_relu(net, conv3_3x3_s1, 'inception_stem1_3x3_s2', num_output=96, kernel_size=3, stride=2)  # 96x73x73
    net.inception_stem1 = L.Concat(net.inception_stem1_pool, inception_stem1_3x3_s2)  # 160x73x73

    inception_stem2_3x3_reduce = factorization_conv_bn_scale_relu(net, net.inception_stem1, 'inception_stem2_3x3_reduce', num_output=64, kernel_size=1)  # 64x73x73
    inception_stem2_3x3 = factorization_conv_bn_scale_relu(net, inception_stem2_3x3_reduce, 'inception_stem2_3x3', num_output=96, kernel_size=3)  # 96x71x71
    inception_stem2_7x1_reduce = factorization_conv_bn_scale_relu(net, inception_stem1, 'inception_stem2_7x1_reduce',  num_output=64, kernel_size=1)  # 64x73x73
    inception_stem2_7x1 = factorization_conv_mxn(net, inception_stem2_7x1_reduce, 'inception_stem2_7x1', num_output=64, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0)  # 64x73x73
    inception_stem2_1x7 = factorization_conv_mxn(net, inception_stem2_7x1, 'inception_stem2_1x7', num_output=64, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3)  # 64x73x73
    inception_stem2_3x3_2 = factorization_conv_bn_scale_relu(net, inception_stem2_1x7,'inception_stem2_3x3_2', num_output=96, kernel_size=3)  # 96x71x71
    
    net.inception_stem2 = L.Concat(inception_stem2_3x3, inception_stem2_3x3_2)  # 192x71x71

    inception_stem3_3x3_s2 = factorization_conv_bn_scale_relu(net, net.inception_stem2, 'inception_stem3_3x3_s2', num_output=192, stride=2)  # 192x35x35
    net.inception_stem3_pool = L.Pooling(net.inception_stem2, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 192x35x35
    inception_stem3 = L.Concat(inception_stem3_3x3_s2, net.inception_stem3_pool)  # 384x35x35

    return inception_stem3
Пример #5
0
def MultiLayersDetectorHeader(net, data_layer="data", num_classes=2, from_layers=[], \
        normalizations=[], use_batchnorm=True, prior_variance = [0.1], \
        pro_widths=[], pro_heights=[], flip=True, clip=True, inter_layer_channels=[], \
        use_focus_loss=False, stage=1,lr_mult=1, decay_mult=1):
    assert num_classes, "must provide num_classes"
    assert num_classes > 0, "num_classes must be positive number"
    if normalizations:
        assert len(from_layers) == len(normalizations), "from_layers and normalizations should have same length"
    assert len(from_layers) == len(pro_widths), "from_layers and pro_widths should have same length"
    assert len(from_layers) == len(pro_heights), "from_layers and pro_heights should have same length"
    if inter_layer_channels:
        assert len(from_layers) == len(inter_layer_channels), "from_layers and inter_layer_channels should have the same length"
    net_layers = net.keys()
    assert data_layer in net_layers, "data_layer is not in net's layers"
    num = len(from_layers)
    priorbox_layers = []
    loc_layers = []
    conf_layers = []
    for i in range(0, num):
        # get feature layer
        from_layer = from_layers[i]
        # get sizes of prior-box layer
        prowidths = []
        proheights = []
        prowidths = pro_widths[i] if type(pro_widths[i]) is list else [pro_widths[i]]
        proheights = pro_heights[i] if type(pro_heights[i]) is list else [pro_heights[i]]
        # get norm value
        normalization = -1
        if normalizations:
            normalization = normalizations[i]
        # get inter_layer_depth
        inter_layer_depth = 0
        if inter_layer_channels:
            inter_layer_depth = inter_layer_channels[i]
        loc_layer,conf_layer,priorbox_layer = \
            UnitLayerDetectorHeader(net, data_layer=data_layer, num_classes=num_classes, \
                feature_layer=from_layer, normalization=normalization, use_batchnorm=use_batchnorm, \
                prior_variance = prior_variance, pro_widths=prowidths, pro_heights=proheights, \
                flip=flip, clip=clip, inter_layer_channels=inter_layer_depth, \
                flat=True, use_focus_loss=use_focus_loss, stage=stage,lr_mult=lr_mult, decay_mult=decay_mult)
        loc_layers.append(loc_layer)
        conf_layers.append(conf_layer)
        priorbox_layers.append(priorbox_layer)
    # Concatenate priorbox, loc, and conf layers.
    mbox_layers = []
    name = "mbox_{}_loc".format(stage)
    net[name] = L.Concat(*loc_layers, axis=1)
    mbox_layers.append(net[name])
    name = "mbox_{}_conf".format(stage)
    net[name] = L.Concat(*conf_layers, axis=1)
    mbox_layers.append(net[name])
    name = "mbox_{}_priorbox".format(stage)
    net[name] = L.Concat(*priorbox_layers, axis=2)
    mbox_layers.append(net[name])
    return mbox_layers
Пример #6
0
    def define_model(self):
        n = caffe.NetSpec()
        pylayer = 'SegDataLayer'

        pydata_params = dict(phase='train', img_root=opt.data_root,
                         batch_size=4,random=True)
        n.data, n.label = L.Python(module='data.SegDataLayer', layer=pylayer,
            ntop=2, param_str=str(pydata_params))
        n.conv1=BasicConv(n.data,32,is_train=self.is_train)#(64,64,64)
        n.downsample1=Inception_v1(n.conv1,32,32,is_train=self.is_train)#(32,32,32)
        n.conv2=BasicConv(n.downsample1,64,is_train=self.is_train)
        n.downsample2=Inception_v1(n.conv2,64,64,is_train=self.is_train)#(16,16,16)
        n.conv3=BasicConv(n.downsample2,128,is_train=self.is_train)
        n.downsample3=Inception_v1(n.conv3,128,128,is_train=self.is_train)#(8,8,8)
        n.conv4=BasicConv(n.downsample3,256,is_train=self.is_train)
        n.downsample4=Inception_v1(n.conv4,256,256,is_train=self.is_train)#(4,4,4)
        
        n.conv4_=SingleConv(n.downsample4,128,is_train=self.is_train)
        n.incept4=Inception_v2(n.conv4_,128,128,is_train=self.is_train)
        n.deconv4=Deconv(n.incept4,128,128,is_train=self.is_train)#(8,8,8)
        up4=[n.deconv4,n.conv4]
        n.concat1_4=L.Concat(*up4)
        
        n.conv5=SingleConv(n.concat1_4,128,is_train=self.is_train)
        n.incept5=Inception_v2(n.conv5,128,128,is_train=self.is_train)
        n.deconv5=Deconv(n.incept5,128,128,is_train=self.is_train)#(16,16,16)
        up5=[n.deconv5,n.conv3]
        n.concat1_5=L.Concat(*up5)
        
        n.conv6=SingleConv(n.concat1_5,64,is_train=self.is_train)
        n.incept6=Inception_v2(n.conv6,64,64,is_train=self.is_train)
        n.deconv6=Deconv(n.incept6,64,64,is_train=self.is_train)#(32,32,32)
        up6=[n.deconv6,n.conv2]
        n.concat1_6=L.Concat(*up6)
        
        n.conv7=SingleConv(n.concat1_6,32,is_train=self.is_train)
        n.incept7=Inception_v2(n.conv7,32,32,is_train=self.is_train)
        n.deconv7=Deconv(n.incept7,32,32,is_train=self.is_train)#(64,64,64)
        up7=[n.deconv7,n.conv1]
        n.concat1_7=L.Concat(*up7)
        
        n.conv8=SingleConv(n.concat1_7,32,is_train=self.is_train)
        n.incept8=Inception_v2(n.conv8,32,32,is_train=self.is_train)
        n.conv9=L.Convolution(n.incept8, kernel_size=1,stride=1,pad=0,
                            num_output=1,weight_filler=dict(type='xavier'))
        n.probs=L.Sigmoid(n.conv9)
        n.probs_=L.Flatten(n.probs)
        n.label_=L.Flatten(n.label)
        #n.loss=L.SoftmaxWithLoss(n.conv9,n.label)
        #n.loss=L.Python(module='DiceLoss', layer="DiceLossLayer",
        #    ntop=1, bottom=[n.probs,n.label])
        with open(self.model_def, 'w') as f:
            f.write(str(n.to_proto()))
Пример #7
0
def stem_v4_299x299(bottom):
    """
    input:3x299x299
    output:384x35x35
    :param bottom: bottom layer
    :return: layers
    """
    conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_s2_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=3, stride=2)  # 32x149x149
    conv2_3x3_s1, conv2_3x3_s1_bn, conv2_3x3_s1_scale, conv2_3x3_s1_relu = \
        factorization_conv_bn_scale_relu(conv1_3x3_s2, num_output=32, kernel_size=3, stride=1)  # 32x147x147
    conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu = \
        factorization_conv_bn_scale_relu(conv2_3x3_s1, num_output=64, kernel_size=3, stride=1, pad=1)  # 64x147x147

    inception_stem1_pool = L.Pooling(conv3_3x3_s1, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 64x73x73
    inception_stem1_3x3_s2, inception_stem1_3x3_s2_bn, inception_stem1_3x3_s2_scale, inception_stem1_3x3_s2_relu = \
        factorization_conv_bn_scale_relu(conv3_3x3_s1, num_output=96, kernel_size=3, stride=2)  # 96x73x73
    inception_stem1 = L.Concat(inception_stem1_pool, inception_stem1_3x3_s2)  # 160x73x73

    inception_stem2_3x3_reduce, inception_stem2_3x3_reduce_bn, inception_stem2_3x3_reduce_scale, \
    inception_stem2_3x3_reduce_relu = \
        factorization_conv_bn_scale_relu(inception_stem1, num_output=64, kernel_size=1)  # 64x73x73
    inception_stem2_3x3, inception_stem2_3x3_bn, inception_stem2_3x3_scale, inception_stem2_3x3_relu = \
        factorization_conv_bn_scale_relu(inception_stem2_3x3_reduce, num_output=96, kernel_size=3)  # 96x71x71
    inception_stem2_7x1_reduce, inception_stem2_7x1_reduce_bn, inception_stem2_7x1_reduce_scale, \
    inception_stem2_7x1_reduce_relu = \
        factorization_conv_bn_scale_relu(inception_stem1, num_output=64, kernel_size=1)  # 64x73x73
    inception_stem2_7x1, inception_stem2_7x1_bn, inception_stem2_7x1_scale, inception_stem2_7x1_relu = \
        factorization_conv_mxn(inception_stem2_7x1_reduce, num_output=64, kernel_h=7, kernel_w=1, pad_h=3,
                               pad_w=0)  # 64x73x73
    inception_stem2_1x7, inception_stem2_1x7_bn, inception_stem2_1x7_scale, inception_stem2_1x7_relu = \
        factorization_conv_mxn(inception_stem2_7x1, num_output=64, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3)  # 64x73x73
    inception_stem2_3x3_2, inception_stem2_3x3_2_bn, inception_stem2_3x3_2_scale, inception_stem2_3x3_2_relu = \
        factorization_conv_bn_scale_relu(inception_stem2_1x7, num_output=96, kernel_size=3)  # 96x71x71
    inception_stem2 = L.Concat(inception_stem2_3x3, inception_stem2_3x3_2)  # 192x71x71

    inception_stem3_3x3_s2, inception_stem3_3x3_s2_bn, inception_stem3_3x3_s2_scale, inception_stem3_3x3_s2_relu = \
        factorization_conv_bn_scale_relu(inception_stem2, num_output=192, stride=2)  # 192x35x35
    inception_stem3_pool = L.Pooling(inception_stem2, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 192x35x35
    inception_stem3 = L.Concat(inception_stem3_3x3_s2, inception_stem3_pool)  # 384x35x35

    return conv1_3x3_s2, conv1_3x3_s2_bn, conv1_3x3_s2_scale, conv1_3x3_s2_relu, conv2_3x3_s1, conv2_3x3_s1_bn, \
           conv2_3x3_s1_scale, conv2_3x3_s1_relu, conv3_3x3_s1, conv3_3x3_s1_bn, conv3_3x3_s1_scale, conv3_3x3_s1_relu, \
           inception_stem1_3x3_s2, inception_stem1_3x3_s2_bn, inception_stem1_3x3_s2_scale, inception_stem1_3x3_s2_relu, \
           inception_stem1_pool, inception_stem1, inception_stem2_3x3_reduce, inception_stem2_3x3_reduce_bn, \
           inception_stem2_3x3_reduce_scale, inception_stem2_3x3_reduce_relu, inception_stem2_3x3, \
           inception_stem2_3x3_bn, inception_stem2_3x3_scale, inception_stem2_3x3_relu, inception_stem2_7x1_reduce, \
           inception_stem2_7x1_reduce_bn, inception_stem2_7x1_reduce_scale, inception_stem2_7x1_reduce_relu, \
           inception_stem2_7x1, inception_stem2_7x1_bn, inception_stem2_7x1_scale, inception_stem2_7x1_relu, \
           inception_stem2_1x7, inception_stem2_1x7_bn, inception_stem2_1x7_scale, inception_stem2_1x7_relu, \
           inception_stem2_3x3_2, inception_stem2_3x3_2_bn, inception_stem2_3x3_2_scale, inception_stem2_3x3_2_relu, \
           inception_stem2, inception_stem3_3x3_s2, inception_stem3_3x3_s2_bn, inception_stem3_3x3_s2_scale, \
           inception_stem3_3x3_s2_relu, inception_stem3_pool, inception_stem3
def add_cnn(n, data, act, batch_size, T, K, num_step, mode='train'):
    n.x_flat = L.Flatten(data, axis=1, end_axis=2)
    n.act_flat = L.Flatten(act, axis=1, end_axis=2)
    if mode == 'train':
        x = L.Slice(n.x_flat, axis=1, ntop=T)
        act_slice = L.Slice(n.act_flat, axis=1, ntop=T - 1)
        x_set = ()
        label_set = ()
        x_hat_set = ()
        silence_set = ()
        for i in range(T):
            t = tag(i + 1)
            n.tops['x' + t] = x[i]
            if i < K:
                x_set += (x[i], )
            if i < T - 1:
                n.tops['act' + t] = act_slice[i]
            if i < K - 1:
                silence_set += (n.tops['act' + t], )
            if i >= K:
                label_set += (x[i], )
        n.label = L.Concat(*label_set, axis=0)
        input_list = list(x_set)
        for step in range(0, num_step):
            step_tag = tag(step + 1) if step > 0 else ''
            t = tag(step + K)
            tp = tag(step + K + 1)
            input_tuple = tuple(input_list)
            n.tops['input' + step_tag] = L.Concat(*input_tuple, axis=1)
            top = add_conv_enc(n, n.tops['input' + step_tag], tag=step_tag)
            n.tops['x_hat' + tp] = add_decoder(n,
                                               top,
                                               n.tops['act' + t],
                                               flatten=False,
                                               tag=step_tag)
            input_list.pop(0)
            input_list.append(n.tops['x_hat' + tp])
    else:
        top = add_conv_enc(n, n.x_flat)
        n.tops['x_hat' + tag(K + 1)] = add_decoder(n,
                                                   top,
                                                   n.act_flat,
                                                   flatten=False)
    if mode == 'train':
        x_hat = ()
        for i in range(K, T):
            t = tag(i + 1)
            x_hat += (n.tops['x_hat' + t], )
        n.x_hat = L.Concat(*x_hat, axis=0)
        n.silence = L.Silence(*silence_set, ntop=0)
        n.l2_loss = L.EuclideanLoss(n.x_hat, n.label)
    return n
Пример #9
0
    def define_model(self):
        n = caffe.NetSpec()
        n.data, n.label = L.Data(batch_size=opt.batch_size,
                                 backend=P.Data.LMDB,
                                 source=opt.lmdb_path,
                                 transform_param=dict(scale=1. / 255),
                                 ntop=2)
        n.conv1 = BasicConv(n.data, 32)  #(64,64,64)
        n.downsample1 = Inception_v1(n.conv1, 32, 32)  #(32,32,32,32)
        n.conv2 = BasicConv(n.downsample1, 64)
        n.downsample2 = Inception_v1(n.conv2, 64, 64)  #(16,16,16,16)
        n.conv3 = BasicConv(n.downsample2, 128)
        n.downsample3 = Inception_v1(n.conv3, 128, 128)  #(8,8,8)
        n.conv4 = BasicConv(n.data, 256)
        n.downsample4 = Inception_v1(n.conv4, 256, 256)  #(4,4,4)

        n.conv4_ = SingleConv(n.downsample4, 128)
        n.incept4 = Inception_v2(n.conv4_, 128, 128)
        n.deconv4 = Deconv(n.incept4, 128)  #(8,8,8)
        up4 = [n.deconv4, n.conv4]
        n.concat4 = L.Concat(*up4, in_place=True)

        n.conv5 = SingleConv(n.concat4, 128)
        n.incept5 = Inception_v2(n.conv5, 128, 128)
        n.deconv5 = Deconv(n.incept5, 128, 128)  #(16,16,16)
        up5 = [n.deconv5, n.conv3]
        n.concat5 = L.Concat(*up5, in_place=True)

        n.conv6 = SingleConv(n.concat5, 64)
        n.incept6 = Inception_v2(n.conv6, 64, 64)
        n.deconv6 = Deconv(n.incept6, 64, 64)  #(32,32,32)
        up6 = [n.deconv6, n.conv2]
        n.concat6 = L.Concat(*up6, in_place=True)

        n.conv7 = SingleConv(n.concat6, 32)
        n.incept7 = Inception_v2(n.conv7, 32, 32)
        n.deconv7 = Deconv(n.incept7, 32, 32)  #(64,64,64)
        up7 = [n.deconv7, n.conv1]
        n.concat7 = L.Concat(*up7, in_place=True)

        n.conv8 = SingleConv(n.concat7, 32)
        n.incept8 = Inception_v2(n.conv8, 32, 32)
        n.conv9 = L.Convolution(n.incept8,
                                kernel_size=1,
                                stride=1,
                                pad=0,
                                num_output=1,
                                weight_filler=dict(type='xavier'))
        n.loss = L.SoftmaxWithLoss(n.conv9, n.label)
        with open(self.model_def, 'w') as f:
            f.write(str(n.to_proto()))
Пример #10
0
def trim_dense_block(netspec,
                     name_prefix,
                     bottom,
                     layer_output=16,
                     dropout=0,
                     phase='train'):
    if phase == 'train':
        use_global_stats = False
    else:
        use_global_stats = True

    current_bottom = conv3d(netspec,
                            name_prefix + '_conv1',
                            bottom,
                            num_output=layer_output,
                            dropout=dropout)
    concat1 = netspec[name_prefix + '_concat1'] = L.Concat(bottom,
                                                           current_bottom,
                                                           axis=1)

    current_bottom = conv3d(netspec,
                            name_prefix + '_conv2',
                            current_bottom,
                            num_output=layer_output,
                            dropout=dropout)
    concat2 = netspec[name_prefix + '_concat2'] = L.Concat(concat1,
                                                           current_bottom,
                                                           axis=1)

    current_bottom = conv3d(netspec,
                            name_prefix + '_conv3',
                            current_bottom,
                            num_output=layer_output,
                            dropout=dropout)
    concat3 = netspec[name_prefix + '_concat3'] = L.Concat(concat2,
                                                           current_bottom,
                                                           axis=1)

    current_bottom = conv3d(netspec,
                            name_prefix + '_conv4',
                            current_bottom,
                            num_output=layer_output,
                            dropout=dropout)
    current_bottom = netspec[name_prefix + '_concat4'] = L.Concat(
        concat3, current_bottom, axis=1)

    # name_prefix + 'concat4'
    # return name_prefix+'concat4'
    return current_bottom
Пример #11
0
def inception_v3_7e(bottom, conv_output):
    conv_1x1, conv_1x1_bn, conv_1x1_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_1x1'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_3x3_0_reduce, conv_3x3_0_reduce_bn, conv_3x3_0_reduce_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_3x3_0_reduce'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_1x3_0, conv_1x3_0_bn, conv_1x3_0_relu, conv_3x1_0, conv_3x1_0_bn, conv_3x1_0_relu = \
        factorization_conv(conv_3x3_0_reduce_bn, kernel=(1, 3), pad=(0, 1),
                           output=(conv_output['conv_1x3_0'], conv_output['conv_3x1_0']))
    conv_3x3_1_reduce, conv_3x3_1_reduce_bn, conv_3x3_1_reduce_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_3x3_1_reduce'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu = \
        conv_bn_relu(conv_3x3_1_reduce_bn, dict(kernel_size=3, num_output=conv_output['conv_3x3_1'], stride=1, pad=1,
                                                group=1, weight_type='xavier', weight_std=0.01, bias_type='constant',
                                                bias_value=0))
    conv_1x3_1, conv_1x3_1_bn, conv_1x3_1_relu, conv_3x1_1, conv_3x1_1_bn, conv_3x1_1_relu = \
        factorization_conv(conv_3x3_1_bn, kernel=(1, 3), pad=(0, 1),
                           output=(conv_output['conv_1x3_1'], conv_output['conv_3x1_1']))
    pool = L.Pooling(bottom,
                     kernel_size=3,
                     stride=1,
                     pad=1,
                     pool=conv_output['pooling'])
    pool_proj, pool_proj_bn, pool_proj_relu = \
        conv_bn_relu(pool, dict(kernel_size=1, num_output=conv_output['pool_proj'], stride=1, pad=0, group=1,
                                weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    concat = L.Concat(conv_1x1_bn, conv_3x1_0_bn, conv_3x1_1_bn, pool_proj_bn)

    return conv_1x1, conv_1x1_bn, conv_1x1_relu, conv_3x3_0_reduce, conv_3x3_0_reduce_bn, conv_3x3_0_reduce_relu, \
           conv_1x3_0, conv_1x3_0_bn, conv_1x3_0_relu, conv_3x1_0, conv_3x1_0_bn, conv_3x1_0_relu, conv_3x3_1_reduce, \
           conv_3x3_1_reduce_bn, conv_3x3_1_reduce_relu, conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu, conv_1x3_1, \
           conv_1x3_1_bn, conv_1x3_1_relu, conv_3x1_1, conv_3x1_1_bn, conv_3x1_1_relu, pool, pool_proj, pool_proj_bn, \
           pool_proj_relu, concat
def route_layer(previous, name, params, train=False):
    """create route layer of yolo"""
    comnon_layers = previous[0]
    conv_layers = previous[1]
    if "layers " in params.keys():
        layers_str = params["layers "]
        lstr_splited = layers_str.split(', ')
        layer_size = len(lstr_splited)
        list_layer = []
    if layer_size == 1:
        layer_index = int(lstr_splited[0])
        return cl.Concat(conv_layers[layer_index], name=name)
    elif layer_size == 2:
        layer_index1 = int(lstr_splited[0])
        layer_index2 = int(lstr_splited[1])
        return cl.Concat(conv_layers[layer_index1], conv_layers[layer_index2-1], name=name)
Пример #13
0
def reduction_v3_a(bottom):
    """
    input:288x35x35
    output:768x17x17
    :param bottom: bottom layer
    :return: layers
    """
    pool = L.Pooling(bottom, kernel_size=3, stride=2,
                     pool=P.Pooling.MAX)  # 384x17x17

    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(bottom, kernel_size=3, num_output=384, stride=2)  # 384x17x17

    conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=64, kernel_size=1)  # 64x35x35
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=96, kernel_size=3, pad=1)  # 96x35x35
    conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2, num_output=96, kernel_size=3, stride=2)  # 96x17x17

    concat = L.Concat(pool, conv_3x3, conv_3x3_3)  # 768(288+384+96)x17x17

    return pool, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
           conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
           conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, concat
Пример #14
0
def inception_resnet_v2_c(bottom):
    """
    input:2080x8x8
    output:2080x8x8
    :param bottom: bottom layer
    :return: layers
    """
    conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1)  # 192x8x8

    conv_1x3_reduce, conv_1x3_reduce_bn, conv_1x3_reduce_scale, conv_1x3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1)  # 192x8x8
    conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu = \
        factorization_conv_mxn(conv_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3, pad_h=0, pad_w=1)  # 224x8x8
    conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu = \
        factorization_conv_mxn(conv_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1, pad_w=0)  # 256x8x8

    concat = L.Concat(conv_1x1, conv_3x1)  # 448(192+256)x8x8
    conv_up, conv_up_bn, conv_up_scale = \
        factorization_conv_bn_scale(concat, num_output=2080, kernel_size=1)  # 2080x8x8 

    residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up)  # 2080x8x8  

    return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x3_reduce, conv_1x3_reduce_bn, \
           conv_1x3_reduce_scale, conv_1x3_reduce_relu, conv_1x3, conv_1x3_bn, conv_1x3_scale, conv_1x3_relu, \
           conv_3x1, conv_3x1_bn, conv_3x1_scale, conv_3x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
           residual_eltwise, residual_eltwise_relu
Пример #15
0
def inception_resnet_v2_b(bottom):
    """
    input:1088x17x17
    output:1088x17x17
    :param bottom: bottom layer
    :return: layers
    """
    conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1)  # 192x17x17

    conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=128, kernel_size=1)  # 128x17x17
    conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
        factorization_conv_mxn(conv_1x7_reduce, num_output=160, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3)  # 160x17x17
    conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
        factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0)  # 192x17x17

    concat = L.Concat(conv_1x1, conv_7x1)  # 384(192+192)x17x17
    conv_up, conv_up_bn, conv_up_scale = \
        factorization_conv_bn_scale(concat, num_output=1088, kernel_size=1)  # 1088x17x17 

    residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up)  # 1088x17x17

    return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_1x7_reduce, conv_1x7_reduce_bn, \
           conv_1x7_reduce_scale, conv_1x7_reduce_relu, conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, \
           conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, concat, conv_up, conv_up_bn, conv_up_scale, \
           residual_eltwise, residual_eltwise_relu
Пример #16
0
def lenet():
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    n["data"] = L.Input(shape=[dict(dim=[1, 3, 1, 1])], ntop=1)
    n.conv1 = L.Convolution(n.data,
                            kernel_size=1,
                            num_output=4,
                            weight_filler=dict(type='gaussian'))
    n.reshape1a = L.Reshape(n.conv1,
                            reshape_param={'shape': {
                                'dim': 0,
                                'dim': -1
                            }})
    n.conv2 = L.Convolution(n.data,
                            kernel_size=1,
                            num_output=4,
                            weight_filler=dict(type='gaussian'))
    n.reshape2a = L.Reshape(n.conv2,
                            reshape_param={'shape': {
                                'dim': 0,
                                'dim': -1
                            }})

    n.concat3 = L.Concat(n.reshape1a, n.reshape2a, concat_param=dict(axis=-1))

    return n.to_proto()
 def test_concat4(self):
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([6, 4, 64, 64]))
     n.input2 = L.Input(shape=make_shape([8, 4, 64, 64]))
     n.input3 = L.Input(shape=make_shape([10, 4, 64, 64]))
     n.concat1 = L.Concat(n.input1, n.input2, n.input3, concat_dim=0)
     self._test_model(*self._netspec_to_model(n, 'concat4'))
 def test_concat2(self):
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([10, 4, 64, 64]))
     n.input2 = L.Input(shape=make_shape([10, 6, 64, 64]))
     n.input3 = L.Input(shape=make_shape([10, 8, 64, 64]))
     n.concat1 = L.Concat(n.input1, n.input2, n.input3, axis=1)
     self._test_model(*self._netspec_to_model(n, 'concat2'))
Пример #19
0
def reduction_resnet_v2_a(bottom):
    """
    input:320x35x35
    output:1088x17x17
    :param bottom: bottom layer
    :return: layers
    """
    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=384, kernel_size=3, stride=2)  # 384x17x17

    conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1)  # 256x35x35
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=256, kernel_size=3, stride=1, pad=1)  # 256x35x35
    conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2, num_output=384, kernel_size=3, stride=2)  # 384x17x17

    pool = L.Pooling(bottom, kernel_size=3, stride=2,
                     pool=P.Pooling.MAX)  # 320x17x17

    concat = L.Concat(conv_3x3, conv_3x3_3, pool)  # 1088(320+384+384)x17x17

    return conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
           conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
           conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, pool, concat
Пример #20
0
def DenseBlock(net, from_layer, out_layer, **bn_param):
    conv_prefix = ''
    conv_postfix = ''
    bn_prefix = ''
    bn_postfix = '/bn'
    scale_prefix = ''
    scale_postfix = '/scale'
    use_scale = True

    out_name = out_layer + '/x1'
    NewConvBNLayer(net, from_layer, out_name, use_conv=True, use_bn=True, use_relu=True,
                   num_output=128, kernel_size=1, pad=0, stride=1,
                   conv_prefix=conv_prefix, conv_postfix=conv_postfix,
                   bn_prefix=bn_prefix, bn_postfix=bn_postfix,
                   scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param)

    out_name = out_layer + '/x2'
    NewConvBNLayer(net, net.keys()[-1], out_name, use_conv=True, use_bn=True, use_relu=True,
                   num_output=32, kernel_size=3, pad=1, stride=1,
                   conv_prefix=conv_prefix, conv_postfix=conv_postfix,
                   bn_prefix=bn_prefix, bn_postfix=bn_postfix,
                   scale_prefix=scale_prefix, scale_postfix=scale_postfix, **bn_param)

    block_name = 'concat_{}'.format(out_layer[4:])  # for example, 'conv3_1' to '3_1'
    net[block_name] = L.Concat(net[from_layer], net[net.keys()[-1]])
Пример #21
0
def inception_resnet_v2_a(bottom):
    """
    input:320x35x35
    output:320x35x35
    :param bottom: bottom layer
    :return: layers
    """
    conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1)  # 32x35x35

    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1)  # 32x35x35
    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=32, kernel_size=3, pad=1)  # 32x35x35

    conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=32, kernel_size=1)  # 32x35x35
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=48, kernel_size=3, pad=1)  # 48x35x35
    conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_2, num_output=64, kernel_size=3, pad=1)  # 64x35x35

    concat = L.Concat(conv_1x1, conv_3x3, conv_3x3_3)  # 128(32+32+64)x35x35
    conv_up, conv_up_bn, conv_up_scale = \
        factorization_conv_bn_scale(concat, num_output=320, kernel_size=1)  # 320x35x35

    residual_eltwise, residual_eltwise_relu = eltwise_relu(bottom, conv_up)  # 320x35x35

    return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
           conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
           conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, \
           conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, \
           conv_3x3_3_relu, concat, conv_up, conv_up_bn, conv_up_scale, residual_eltwise, residual_eltwise_relu
Пример #22
0
def exmaple_use_of_lstm():
  T = 3 # number of time steps
  B = 10 # batch size
  lstm_output = 500 # dimension of LSTM unit

  # use net spec
  ns = caffe.NetSpec()

  # we need initial values for h and c
  ns.h0 = L.DummyData(name='h0', dummy_data_param={'shape':{'dim':[1,B,lstm_output]},
                               'data_filler':{'type':'constant','value':0}})

  ns.c0 = L.DummyData(name='c0', dummy_data_param={'shape':{'dim':[1,B,lstm_output]},
                                   'data_filler':{'type':'constant','value':0}})

  # simulate input X over T time steps and B sequences (batch size)
  ns.X = L.DummyData(name='X', dummy_data_param={'shape': {'dim':[T,B,128,10,10]}} )
  # slice X for T time steps
  xt = L.Slice(ns.X, name='slice_X',ntop=T,slice_param={'axis':0,'slice_point':range(1,T)})
  # unroling
  h = ns.h0
  c = ns.c0
  lstm_weights = None
  tops = []
  for t in xrange(T):
    c, h, lstm_weights = single_time_step_lstm( ns, h, c, xt[t], 't'+str(t)+'/', lstm_output, lstm_weights)
    tops.append(h)
    ns.__setattr__('c'+str(t),c)
    ns.__setattr__('h'+str(t),h)
  # concat all LSTM tops (h[t]) to a single layer
  ns.H = L.Concat( *tops, name='concat_h',concat_param={'axis':0} )
  return ns
Пример #23
0
def reduction_resnet_v2_b(bottom):
    """
    input:1088x17x17
    output:2080x8x8
    :param bottom: bottom layer
    :return: layers
    """
    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1)  # 256x17x17
    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=384, kernel_size=3, stride=2)  # 384x8x8

    conv_3x3_2_reduce, conv_3x3_2_reduce_bn, conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1)  # 256x17x17
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \ 
        factorization_conv_bn_scale_relu(conv_3x3_2_reduce, num_output=288, kernel_size=3, stride=2)  # 288x8x8

    conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=256, kernel_size=1)  # 256x17x17
    conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_3_reduce, num_output=288, kernel_size=3, pad=1)  # 288x17x17
    conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, conv_3x3_4_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_3, num_output=320, kernel_size=3, stride=2)  # 320x8x8

    pool = L.Pooling(bottom, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 1088x8x8

    concat = L.Concat(conv_3x3, conv_3x3_2, conv_3x3_4, pool)  # 2080(1088+384+288+320)x8x8 

    return conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, \
           conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, conv_3x3_2_reduce, conv_3x3_2_reduce_bn, \
           conv_3x3_2_reduce_scale, conv_3x3_2_reduce_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, \
           conv_3x3_2_relu, conv_3x3_3_reduce, conv_3x3_3_reduce_bn, conv_3x3_3_reduce_scale, conv_3x3_3_reduce_relu, \
           conv_3x3_3, conv_3x3_3_bn, conv_3x3_3_scale, conv_3x3_3_relu, conv_3x3_4, conv_3x3_4_bn, conv_3x3_4_scale, \
           conv_3x3_4_relu, pool, concat
Пример #24
0
def join_layer(layer_config, bottom_name):
    '''For ReLU layer, top=bottom'''
    bottom = bottom_name.split(',') 
    bottom.pop()
    return L.Concat(name = layer_config['name'],
                     ntop = 0, top = layer_config['name'],
                     bottom=bottom)
Пример #25
0
def upsample_merge(n, data, to_upsample, width, iters):
    deconv = L.Deconvolution(to_upsample,
                             convolution_param=dict(num_output=width,
                                                    kernel_size=2,
                                                    stride=2,
                                                    weight_filler=get_filler(),
                                                    bias_filler=dict([
                                                        ('type', 'constant'),
                                                        ('value', 0)
                                                    ])),
                             param=[{
                                 "lr_mult": 1,
                                 "decay_mult": 1
                             }, {
                                 "lr_mult": 2,
                                 "decay_mult": 0
                             }])
    prelu = L.PReLU(deconv)
    concat = L.Concat(data, prelu)
    netset(n, "concat_up_" + str(width), concat)

    left = left_branch(concat, width * 2, iters)
    netset(n, "left_branch_up_" + str(width), left)

    eltwise = L.Eltwise(concat, left, operation=P.Eltwise.SUM)
    netset(n, "elementwise_up_" + str(width), eltwise)
    return L.PReLU(eltwise, in_place=True)
Пример #26
0
def inception_v3_7d(bottom, conv_output):
    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_3x3_reduce'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_3x3_0, conv_3x3_0_bn, conv_3x3_0_relu = \
        conv_bn_relu(conv_3x3_reduce_bn, dict(kernel_size=3, num_output=conv_output['conv_3x3_0'], stride=2, pad=0,
                                              group=1, weight_type='xavier', weight_std=0.01, bias_type='constant',
                                              bias_value=0))
    conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_1x7_reduce'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_1x7, conv_1x7_bn, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_relu = \
        factorization_conv(conv_1x7_reduce_bn, output=(conv_output['conv_1x7'], conv_output['conv_7x1']))
    conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu = \
        conv_bn_relu(conv_7x1_bn, dict(kernel_size=3, num_output=conv_output['conv_3x3_1'], stride=2, pad=0,
                                       group=1, weight_type='xavier', weight_std=0.01, bias_type='constant',
                                       bias_value=0))
    pool = L.Pooling(bottom,
                     kernel_size=3,
                     stride=2,
                     pad=0,
                     pool=P.Pooling.MAX)
    concat = L.Concat(conv_3x3_0_bn, conv_3x3_1_bn, pool)

    return conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_relu, conv_3x3_0, conv_3x3_0_bn, conv_3x3_0_relu, \
           conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_relu, conv_1x7, conv_1x7_bn, conv_1x7_relu, conv_7x1, \
           conv_7x1_bn, conv_7x1_relu, conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu, pool, concat
Пример #27
0
def reduction_v3_b(bottom):
    """
    input:768x17x17
    output:1280x8x8
    :param bottom: bottom layer
    :return: layers
    """
    pool = L.Pooling(bottom, kernel_size=3, stride=2,
                     pool=P.Pooling.MAX)  # 768x8x8

    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1)  # 192x17x17
    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=320, kernel_size=3, stride=2)  # 320x8x8

    conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=192, kernel_size=1)  # 192x17x17
    conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu = \
        factorization_conv_mxn(conv_1x7_reduce, num_output=192, kernel_h=1, kernel_w=7, pad_h=0, pad_w=3)  # 192x17x17
    conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu = \
        factorization_conv_mxn(conv_1x7, num_output=192, kernel_h=7, kernel_w=1, pad_h=3, pad_w=0)  # 192x17x17
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu = \
        factorization_conv_bn_scale_relu(conv_7x1, num_output=192, kernel_size=3, stride=2)  # 192x8x8

    concat = L.Concat(pool, conv_3x3, conv_3x3_2)  # 1280(768+320+192)x8x8

    return pool, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, \
           conv_3x3_scale, conv_3x3_relu, conv_1x7_reduce, conv_1x7_reduce_bn, conv_1x7_reduce_scale, conv_1x7_reduce_relu, \
           conv_1x7, conv_1x7_bn, conv_1x7_scale, conv_1x7_relu, conv_7x1, conv_7x1_bn, conv_7x1_scale, conv_7x1_relu, \
           conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_scale, conv_3x3_2_relu, concat
Пример #28
0
def inception_bn(bottom, conv_output):
    conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_1x1'], kernel_size=1)

    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_scale, conv_3x3_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_3x3_reduce'], kernel_size=1)
    conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu = \
        factorization_conv_bn_scale_relu(conv_3x3_reduce, num_output=conv_output['conv_3x3'], kernel_size=3, pad=1)

    conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu = \
        factorization_conv_bn_scale_relu(bottom, num_output=conv_output['conv_5x5_reduce'], kernel_size=1)
    conv_5x5, conv_5x5_bn, conv_5x5_scale, conv_5x5_relu = \
        factorization_conv_bn_scale_relu(conv_5x5_reduce, num_output=conv_output['conv_5x5'], kernel_size=5, pad=2)

    pool = L.Pooling(bottom,
                     kernel_size=3,
                     stride=1,
                     pad=1,
                     pool=P.Pooling.MAX)
    pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu = \
        factorization_conv_bn_scale_relu(pool, num_output=conv_output['pool_proj'], kernel_size=1)

    concat = L.Concat(conv_1x1, conv_3x3, conv_5x5, pool_proj)

    return conv_1x1, conv_1x1_bn, conv_1x1_scale, conv_1x1_relu, conv_3x3_reduce, conv_3x3_reduce_bn, \
           conv_3x3_reduce_scale, conv_3x3_reduce_relu, conv_3x3, conv_3x3_bn, conv_3x3_scale, conv_3x3_relu, \
           conv_5x5_reduce, conv_5x5_reduce_bn, conv_5x5_reduce_scale, conv_5x5_reduce_relu, conv_5x5, conv_5x5_bn, \
           conv_5x5_scale, conv_5x5_relu, pool, pool_proj, pool_proj_bn, pool_proj_scale, pool_proj_relu, concat
Пример #29
0
def joinBlock(name, opn, n, inputA, inputB, train=True):
    trainparam = []
    trainparam2 = []
    if train:
        trainparam = [dict(lr_mult=1, decay_mult= 1),dict(lr_mult=1, decay_mult= 1)]
        trainparam2 = [dict(lr_mult=1, decay_mult= 1),dict(lr_mult=1, decay_mult= 1),dict(lr_mult=1, decay_mult= 1)]
    else:
        trainparam = [dict(lr_mult=0, decay_mult= 0),dict(lr_mult=0, decay_mult= 0)]
        trainparam2 = [dict(lr_mult=0, decay_mult= 0),dict(lr_mult=0, decay_mult= 0),dict(lr_mult=0, decay_mult= 0)]
    #if global_stat:
        #trainparam2 = [dict(lr_mult=0, decay_mult= 0),dict(lr_mult=0, decay_mult= 0),dict(lr_mult=0, decay_mult= 0)]
    #TRAINABLE???? TODO
    s = list(name)
    del s[0]
    if s[0] == 'B':
        del s[0]
    nname = "".join(s)
    if upsample == False:
        convolution_param = dict(num_output=opn, kernel_size=2, stride=2, pad=0, weight_filler = dict(type='xavier'))
        n["upsample"+name] = L.Deconvolution(n[inputA], convolution_param=convolution_param, param=trainparam, name="upsampleparam"+nname) 
    else:
        convolution_param = dict(num_output=opn, kernel_size=2, stride=2, pad=0, weight_filler = dict(type='constant', value=0.0), bias_term=False)
        n["upsample"+name] = L.Deconvolution(n[inputA], convolution_param=convolution_param
                             ,param=[dict(lr_mult=0, decay_mult= 0)], name="upsampleparam"+nname)

    n["upsampleB"+name] = L.BatchNorm(n["upsample"+name], use_global_stats=global_stat, param=trainparam2, name="upsampleBparam"+nname)#, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])
    n[inputB] = L.BatchNorm(n[inputB], use_global_stats=global_stat, param=trainparam2, name=nname+"param")#, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])
    #ACHTUNG BEI BATCHNORM upsampleB genau da drunter !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
    n["concat"+name] = L.Concat(n["upsampleB"+name], n[inputB], concat_param=dict(axis=1))
    return n, "concat"+name
Пример #30
0
def inception_v3_7b(bottom, conv_output):
    conv_3x3_0, conv_3x3_0_bn, conv_3x3_0_relu = \
        conv_bn_relu(bottom, dict(kernel_size=3, num_output=conv_output['conv_3x3_0'], stride=2, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_relu = \
        conv_bn_relu(bottom, dict(kernel_size=1, num_output=conv_output['conv_3x3_reduce'], stride=1, pad=0, group=1,
                                  weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu = \
        conv_bn_relu(conv_3x3_reduce_bn, dict(kernel_size=3, num_output=conv_output['conv_3x3_1'], stride=1, pad=1,
                                              group=1, weight_type='xavier', weight_std=0.01, bias_type='constant',
                                              bias_value=0))
    conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_relu = \
        conv_bn_relu(conv_3x3_1_bn, dict(kernel_size=3, num_output=conv_output['conv_3x3_2'], stride=2, pad=0, group=1,
                                         weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    pool = L.Pooling(bottom,
                     kernel_size=3,
                     stride=2,
                     pad=0,
                     pool=P.Pooling.MAX)
    # pool_proj, pool_proj_bn, pool_proj_relu = \
    #     conv_bn_relu(pool, dict(kernel_size=1, num_output=conv_output['pool_proj'], stride=1, pad=0, group=1,
    #                             weight_type='xavier', weight_std=0.01, bias_type='constant', bias_value=0))
    concat = L.Concat(conv_3x3_0_bn, conv_3x3_2_bn, pool)

    return conv_3x3_0, conv_3x3_0_bn, conv_3x3_0_relu, conv_3x3_reduce, conv_3x3_reduce_bn, conv_3x3_reduce_relu, \
           conv_3x3_1, conv_3x3_1_bn, conv_3x3_1_relu, conv_3x3_2, conv_3x3_2_bn, conv_3x3_2_relu, pool, concat