示例#1
0
def train_FNNSR(train_data=train_data_path, test_data=test_data_path,
                batch_size_train=batch_size_train, batch_size_test=batch_size_test,
                depth=depth, channel=channel, kernel=kernel):
    net = caffe.NetSpec()
    net.data, net.label = L.HDF5Data(hdf5_data_param={'source': train_data,
        'batch_size': batch_size_train}, include={'phase': caffe.TRAIN}, ntop=2)
    train_data_layer = str(net.to_proto())
    net.data, net.label = L.HDF5Data(hdf5_data_param={'source': test_data,
        'batch_size': batch_size_test}, include={'phase': caffe.TEST}, ntop=2)

    ## pointwise product
    net.layer_out = weight_smooth(net.data, kernel)
    for i in range(channel-1):
        net.model = weight_smooth(net.data, kernel)
        net.layer_out = L.Eltwise(net.layer_out, net.model)

    net.S = net.layer_out

    for j in range(depth-1):
        net.layer_in = net.layer_out
        ## pointwise product
        net.layer_out = weight_smooth(net.layer_in, kernel)
        for i in range(channel - 1):
            net.model = weight_smooth(net.layer_in, kernel)
            net.layer_out = L.Eltwise(net.layer_out, net.model)
        net.S = L.Eltwise(net.S, net.layer_out)

    net.P = weighting(net.S)
    net.loss = L.EuclideanLoss(net.P, net.label)

    return train_data_layer + str(net.to_proto())
示例#2
0
    def create_architecture(self, mode, hdf5_data):
        """Returns the architecture (i.e., caffe prototxt) of the model.

        Jer: One day this should probably be written to be more general.
        """

        arch = self.arch
        pars = self.pars
        n = caffe.NetSpec()

        if mode == 'deploy':
            n.data = L.DummyData(shape=[dict(dim=pars['deploy_dims'])])
        elif mode == 'train':
            n.data, n.label = L.HDF5Data(batch_size=pars['train_batch_size'], source=hdf5_data, ntop=pars['ntop'])
        else:  # Test.
            n.data, n.label = L.HDF5Data(batch_size=pars['test_batch_size'], source=hdf5_data, ntop=pars['ntop'])

        # print(n.to_proto())
        in_layer = n.data

        for layer in arch:
            layer_type, vals = layer

            if layer_type == 'e2e':
                in_layer = n.e2e = e2e_conv(in_layer, vals['n_filters'], vals['kernel_h'], vals['kernel_w'])
            elif layer_type == 'e2n':
                in_layer = n.e2n = e2n_conv(in_layer, vals['n_filters'], vals['kernel_h'], vals['kernel_w'])
            elif layer_type == 'fc':
                in_layer = n.fc = full_connect(in_layer, vals['n_filters'])
            elif layer_type == 'out':
                n.out = full_connect(in_layer, vals['n_filters'])
                # Rename to user specified unique layer name.
                # n.__setattr__('out', n.new_layer)

            elif layer_type == 'dropout':
                in_layer = n.dropout = L.Dropout(in_layer, in_place=True,
                                                 dropout_param=dict(dropout_ratio=vals['dropout_ratio']))
            elif layer_type == 'relu':
                in_layer = n.relu = L.ReLU(in_layer, in_place=True,
                                           relu_param=dict(negative_slope=vals['negative_slope']))
            else:
                raise ValueError('Unknown layer type: ' + str(layer_type))

        # ~ end for.

        if mode != 'deploy':
            if self.pars['loss'] == 'EuclideanLoss':
                n.loss = L.EuclideanLoss(n.out, n.label)
            else:
                ValueError("Only 'EuclideanLoss' currently implemented for pars['loss']!")

        return n
示例#3
0
def lenethdf5(hdf5, batch_size):
    # Net: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()

    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)

    # the base net
    n.conv1, n.relu1 = conv_relu(n.data, 32)
    n.pool1 = max_pool(n.relu1)

    n.conv2, n.relu2 = conv_relu(n.pool1, 64)
    n.pool2 = max_pool(n.relu2)
    n.conv3, n.relu3 = conv_relu(n.pool2, 128)
    n.pool3 = max_pool(n.relu3)

    # fully convolutional
    n.fc1, n.rlfc1 = conv_relu(n.pool3, 512, ks=3, pad=1)

    n.decov5 = deconv(n.rlfc1, 128, pad=1)
    n.relu5, n.conv5 = relu_conv(n.decov5, 128, pad=0)
    n.decov6 = deconv(n.conv5, 64, pad=1)
    n.relu6, n.conv6 = relu_conv(n.decov6, 64, pad=0)
    n.decov7 = deconv(n.conv6, 32, pad=1)
    n.relu7, n.conv7 = relu_conv(n.decov7, 32, pad=0)

    n.relu8, n.conv8 = relu_conv(n.conv7, 2, pad=0)

    n.accuracy = L.Accuracy(n.conv8, n.label)
    n.loss = L.SoftmaxWithLoss(n.conv8, n.label)

    return n.to_proto()
示例#4
0
 def modeltest(hdf5s, hdf5t, batch_size):
     #logistic regression: data, matrix multiplication, and 2-class softmax loss
     n = caffe.NetSpec()
     #n.data, n.lp_label, n.bag_label = L.HDF5Data(batch_size=batch_size, source=hdf5t, ntop=3)
     #n.data, n.lp_label, n.instance_label = L.HDF5Data(batch_size=batch_size, source=hdf5t, ntop=3)
     #n.data, n.lp_label = L.HDF5Data(batch_size=batch_size, source=hdf5t, ntop=2)
     n.data, n.lp_label, n.bag_label, n.instance_label = L.HDF5Data(batch_size=batch_size, source=hdf5t, ntop=4)
     n.dc_label=L.DummyData(data_filler=dict(type='constant', value=1), num=batch_size, channels=1, height=1, width=1)
     n.ip1 = L.InnerProduct(n.data, num_output=neuronL1, weight_filler=dict(type='xavier'))
     n.relu1 = L.Sigmoid(n.ip1, in_place=True)
     #n.dropout1 = L.Dropout(n.relu1, dropout_ratio=0.5)
     n.ip2 = L.InnerProduct(n.relu1, num_output=neuronL1-400, weight_filler=dict(type='xavier'))
     n.target_feature=L.Split(n.ip2)
     n.ip4 = L.InnerProduct(n.target_feature, num_output=1, weight_filler=dict(type='xavier'))
     #n.ip5=L.Sigmoid(n.ip4, in_place=True)
     #n.real, n.ip3 = L.Python(n.source_feature, n.lp_label, n.bag_label, module= 'missSVM', layer='missSVMLayer', ntop=2)
     #n.ip3 = L.InnerProduct(n.source_feature, num_output=1, weight_filler=dict(type='xavier'))
     #n.accuracy = L.Accuracy(n.ip4, n.lp_label)
     #L.Silence(n.bag_label);
     #n.losslp = L.Python(n.ip4, n.lp_label, n.bag_label, module = 'GMloss', layer='MultipleInstanceLossLayer')
     #n.P , n.Y = L.Python(n.ip4, n.lp_label, n.bag_label, module = 'MIloss', layer='MultipleInstanceLossLayer', ntop=2)
     #n.losslp = L.SigmoidCrossEntropyLoss(n.P, n.Y)
     n.losslp = L.SigmoidCrossEntropyLoss(n.ip4, n.lp_label)
     #n.losstlp = L.SigmoidCrossEntropyLoss(n.ip4, n.lp_label)
     n.grl= L.GradientScaler(n.ip2, lower_bound=0.0)
     n.ip11 = L.InnerProduct(n.grl, num_output=300, weight_filler=dict(type='xavier'))
     n.relu11 = L.Sigmoid(n.ip11, in_place=True)
     n.dropout11 = L.Dropout(n.relu11, dropout_ratio=0.5)
     n.ip12 = L.InnerProduct(n.dropout11, num_output=1, weight_filler=dict(type='xavier'))
     n.lossdc = L.SigmoidCrossEntropyLoss(n.ip12, n.dc_label, loss_weight=0.1) 
     return n.to_proto()
def VS_net(train=True,
           learn_all=False,
           subset=None,
           deploy=False,
           numClassesVS=numClassesVS):
    if subset is None:
        subset = 'train' if train else 'test'
    #source = caffe_root + 'data/flickr_style/%s.txt' % subset
    #transform_param = dict(mirror=train, crop_size=227, mean_file=caffe_root + 'data/ilsvrc12/imagenet_mean.binaryproto')
    #style_data, style_label = L.ImageData(transform_param=transform_param, source=source,batch_size=50, new_height=256, new_width=256, ntop=2)

    if train:
        batch_size = 50
    else:
        batch_size = 1

    # HDF5 data source
    VS_data, VS_label = L.HDF5Data(source=hdf5DataFolderPath +
                                   'train_h5_list.txt',
                                   batch_size=batch_size,
                                   ntop=2)

    # test
    print type(VS_label)
    return caffenet(data=VS_data,
                    label=VS_label,
                    train=train,
                    num_classes=numClassesVS,
                    classifier_name='fc8_VS',
                    learn_all=learn_all,
                    deploy=deploy)
def style_net(train=True, learn_all=False, subset=None):
    if subset is None:
        subset = 'train' if train else 'test'
    source = caffe_root + 'data/flickr_style/%s.txt' % subset
    transform_param = dict(mirror=train,
                           crop_size=227,
                           mean_file=caffe_root +
                           'data/ilsvrc12/imagenet_mean.binaryproto')
    style_data, style_label = L.ImageData(transform_param=transform_param,
                                          source=source,
                                          batch_size=50,
                                          new_height=256,
                                          new_width=256,
                                          ntop=2)

    # HDF5 data source
    style_data, style_label = L.HDF5Data(source=hdf5DataFolderPath +
                                         'train_h5_list.txt',
                                         batch_size=50,
                                         ntop=2)

    # test
    print type(style_label)
    return caffenet(data=style_data,
                    label=style_label,
                    train=train,
                    num_classes=NUM_STYLE_LABELS,
                    classifier_name='fc8_flickr',
                    learn_all=learn_all)
示例#7
0
def SRCNN3D(hdf5name, batch_size, kernel ):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, 
                                 source=hdf5name, 
                                 ntop=2, 
                                 include=dict(phase = caffe.TRAIN))
    n.conv1 = L.Convolution(n.data, kernel_size=kernel[0], num_output=64, stride = 1, pad = 0,
                            param=[{'lr_mult':1},{'lr_mult':0.1}],
                            weight_filler=dict(type='gaussian',std=0.001),
                            bias_filler = dict(type= "constant", value=0),
                            engine = 1 ) 
    n.relu1 = L.ReLU(n.conv1, 
                     in_place=True,
                     engine = 1)
    n.conv2 = L.Convolution(n.conv1, kernel_size=kernel[1], num_output=32, stride = 1, pad = 0,
                            param=[{'lr_mult':1},{'lr_mult':0.1}],
                            weight_filler=dict(type='gaussian',std=0.001),
                            bias_filler = dict(type= "constant", value=0),
                            engine = 1 )                 
    n.relu2 = L.ReLU(n.conv2, 
                     in_place=True,
                     engine = 1)
    n.conv3 = L.Convolution(n.conv2, kernel_size=kernel[2], num_output=1, stride = 1, pad = 0,
                            param=[{'lr_mult':1},{'lr_mult':0.1}],
                            weight_filler=dict(type='gaussian',std=0.001),
                            bias_filler = dict(type= "constant", value=0),   
                            engine = 1 )    
    n.conv3_flat = L.Flatten(n.conv3)        
    n.label_flat = L.Flatten(n.label)
    n.loss = L.EuclideanLoss(n.conv3_flat,n.label_flat)                 
    return n.to_proto()
示例#8
0
def ipcai(database, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()

    n.data, n.label = L.HDF5Data(batch_size=batch_size,
                                 source=database,
                                 ntop=2)

    n.fc1 = L.InnerProduct(n.data,
                           num_output=25,
                           weight_filler=dict(type='xavier'),
                           bias_filler=dict(type='constant', value=0.1))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.fc2 = L.InnerProduct(n.relu1,
                           num_output=25,
                           weight_filler=dict(type='xavier'),
                           bias_filler=dict(type='constant', value=0.1))
    n.relu2 = L.ReLU(n.fc2, in_place=True)
    n.score = L.InnerProduct(n.relu2,
                             num_output=1,
                             weight_filler=dict(type='xavier'),
                             bias_filler=dict(type='constant', value=0.1))
    n.loss = L.EuclideanLoss(n.score, n.label)

    return n.to_proto()
示例#9
0
def logreg(hdf5, batch_size):
    # logistic regression: data, matrix multiplication, and 2-class softmax loss
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    n.ip1 = L.InnerProduct(n.data, num_output=2, weight_filler=dict(type='xavier'))
    n.accuracy = L.Accuracy(n.ip1, n.label)
    n.loss = L.SoftmaxWithLoss(n.ip1, n.label)
    return n.to_proto()
示例#10
0
def net(hdf5, batch_size):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    n.ip1 = L.InnerProduct(n.data, num_output=50, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1, num_output=4, weight_filler=dict(type='xavier'))
    n.loss = L.SigmoidCrossEntropyLoss(n.ip2, n.label)

    return n.to_proto()
def model(h5path, batchsize):
    n = caffe.NetSpec()
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(source=h5path, batch_size=batchsize, ntop=2)
    n.ip1 = L.InnerProduct(n.data,
                           num_output=10,
                           weight_filler=dict(type='xavier'))
    n.accuracy = L.Accuracy(n.ip1, n.label)
    n.loss = L.SoftmaxWithLoss(n.ip1, n.label)
    return n.to_proto()
def train_SRDenseNet(train_data=train_data_path, test_data=test_data_path,
                     batch_size_train=batch_size_train, batch_size_test=batch_size_test,
                     first_channel=first_channel, block=block, depth=depth, grow_rate=grow_rate,
                     bottleneck=bottleneck, dropout=dropout):
    net = caffe.NetSpec()
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': train_data, 'batch_size': batch_size_train}, include={'phase': caffe.TRAIN}, ntop=2)
    train_data_layer = str(net.to_proto())
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': test_data, 'batch_size': batch_size_test}, include={'phase': caffe.TEST}, ntop=2)

    net.model = conv_relu(net.data, channel=first_channel, kernel=3, stride=1, pad=1, dropout=dropout)

    num_channels = first_channel
    for i in range(block):
        net.dense = conv_relu(net.model, channel=grow_rate, kernel=3, stride=1, pad=1, dropout=dropout)
        for j in range(depth-1):
            net.dense = add_layer(net.dense, grow_rate, dropout)
        num_channels += grow_rate * depth
        net.model = L.Concat(net.model, net.dense, axis=1)

    net.bottleneck = conv_relu(net.model, channel=bottleneck, kernel=1, stride=1, pad=0, dropout=dropout)
    net.deconv1 = L.Deconvolution(net.bottleneck, convolution_param=dict(num_output=bottleneck,
                                                  kernel_size=4, stride=2, pad=1,
                                                  bias_term=False,
                                                  weight_filler=dict(type='msra'),
                                                  bias_filler=dict(type='constant')))
    net.deconv1 = L.ReLU(net.deconv1, in_place=True)

    net.deconv2 = L.Deconvolution(net.deconv1, convolution_param=dict(num_output=bottleneck,
                                               kernel_size=4, stride=2, pad=1,
                                               bias_term=False,
                                               weight_filler=dict(type='msra'),
                                               bias_filler=dict(type='constant')))
    net.deconv2 = L.ReLU(net.deconv2, in_place=True)

    net.reconstruct = L.Convolution(net.deconv2, num_output=1, kernel_size=3, stride=1,
                                    pad=1, bias_term=False, weight_filler=dict(type='msra'),
                                    bias_filler=dict(type='constant'))

    net.loss = L.EuclideanLoss(net.reconstruct, net.label)

    return train_data_layer + str(net.to_proto())
示例#13
0
def logreg(hdf5, batch_size):
    n = caffe.NetSpec()  # 创建神经网络
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5,
                                 ntop=2)  # 读入数据集
    n.ip1 = L.InnerProduct(n.data,
                           num_output=2,
                           weight_filler=dict(type='xavier'))  # 输入层定义
    n.accuracy = L.Accuracy(n.ip1, n.label)  # 准确度评估值
    n.loss = L.SoftmaxWithLoss(n.ip1, n.label)  # 输出层
    return n.to_proto()
示例#14
0
def train_IFNNSR():
    net = caffe.NetSpec()
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': train_data_path,
        'batch_size': batch_size_train
    },
                                     include={'phase': caffe.TRAIN},
                                     ntop=2)
    train_data_layer = str(net.to_proto())
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': test_data_path,
        'batch_size': batch_size_test
    },
                                     include={'phase': caffe.TEST},
                                     ntop=2)

    net.model = net.data
    net.model = weight(net.model, share)  # element-wise product
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, share, group, kernel, dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.sum = net.model

    for j in range(depth - 1):
        net.model = weight(net.model, share)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, share, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.sum = L.Eltwise(net.sum, net.model)

    net.predict = weight(net.sum, share)
    net.loss = L.WeightL2Loss(net.predict, net.label)
    # net.loss = L.EuclideanLoss(net.predict, net.label)

    return train_data_layer + str(net.to_proto())
示例#15
0
def gen_net(train_hdf5_in,
            train_batch_size,
            test_hdf5_in,
            test_batch_size,
            deploy=False):

    # Input Layers
    n = caffe.NetSpec()
    if deploy:
        n.data = L.DummyData(ntop=1, shape=[dict(dim=[1, 1, 20, 20, 20])])
    else:
        n.data, n.label = L.HDF5Data(
            ntop=2,
            include=dict(phase=caffe.TRAIN),
            hdf5_data_param=dict(batch_size=train_batch_size),
            source=train_hdf5_in)
        n.data2 = L.HDF5Data(ntop=0,
                             top=['data', 'label'],
                             include=dict(phase=caffe.TEST),
                             hdf5_data_param=dict(batch_size=test_batch_size),
                             source=test_hdf5_in)

    # Core Architecture
    n.deconv1 = Deconvolution(n.data)
    n.conv1, n.bn1, n.relu1 = Convolution_BN_ReLU(n.deconv1, num_output=64)
    n.conv2, n.bn2, n.relu2 = Convolution_BN_ReLU(n.relu1, num_output=64)
    n.conv3, n.bn3, n.relu3 = Convolution_BN_ReLU(n.relu2, num_output=32)
    n.conv4, n.bn4, n.relu4 = Convolution_BN_ReLU(n.relu3, num_output=16)
    n.conv5, n.bn5, n.relu5 = Convolution_BN_ReLU(n.relu4, num_output=16)
    n.conv6 = Convolution(n.relu5,
                          num_output=1,
                          param=[dict(lr_mult=0.1),
                                 dict(lr_mult=0.1)])
    n.recon = L.Eltwise(n.deconv1, n.conv6, operation=P.Eltwise.SUM)

    # Output Layers
    if not deploy:
        n.loss = L.EuclideanLoss(n.recon, n.label)
        #n.loss = L.Python (n.recon, n.label, python_param=dict(module='pyloss',layer='SmoothL1LossLayer_2'),loss_weight=1)

    # Return the network
    return n.to_proto()
示例#16
0
def finetuningNet(h5, batch_size, layerNum):
    n = caffe.NetSpec()

    n.data, n.label = L.HDF5Data(source=h5,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 ntop=2)
    flatdata = L.Flatten(n.data)
    flatdata_name = 'flatdata'
    n.__setattr__(flatdata_name, flatdata)

    param = learned_param
    for l in range(layerNum):
        if l == 0:
            encoder_name_last = flatdata_name
        else:
            encoder_name_last = relu_en_name

        encoder = L.InnerProduct(n[encoder_name_last],
                                 num_output=layerNeuronNum[l + 1],
                                 param=param,
                                 weight_filler=dict(type='gaussian',
                                                    std=0.005),
                                 bias_filler=dict(type='constant', value=0.1))
        encoder_name = 'encoder' + str(l + 1)
        n.__setattr__(encoder_name, encoder)

        relu_en = L.ReLU(n[encoder_name], in_place=True)
        relu_en_name = 'relu_en' + str(l + 1)
        n.__setattr__(relu_en_name, relu_en)

    for l in range(layerNum):
        if l == 0:
            decoder_name_last = relu_en_name
        else:
            decoder_name_last = relu_de_name

        decoder = L.InnerProduct(n[decoder_name_last],
                                 num_output=layerNeuronNum[layerNum - l - 1],
                                 param=param,
                                 weight_filler=dict(type='gaussian',
                                                    std=0.005),
                                 bias_filler=dict(type='constant', value=0.1))
        decoder_name = 'decoder' + str(layerNum - l)
        n.__setattr__(decoder_name, decoder)

        if l < (layerNum - 1):
            relu_de = L.ReLU(n[decoder_name], in_place=True)
            relu_de_name = 'relu_de' + str(layerNum - l)
            n.__setattr__(relu_de_name, relu_de)

        n.loss = L.EuclideanLoss(n[decoder_name], n.flatdata)

    return n.to_proto()
示例#17
0
def nonlinear_net(hdf5, batch_size):
    #one  small nonlinearnet , on liap for model kind
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    n.ip1 = L.InnerProduct(n.data,
                           num_output=40,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.ip1,
                           num_output=2,
                           weight_filler=dict(type='xavier'))
    n.accuracy = L.Accuracy(n.ip2, n.label)
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
示例#18
0
def nonlinear_net(hdf5, batch_size):
    # one small nonlinearity, one leap for model kind
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    # define a hidden layer of dimension 40
    n.ip1 = L.InnerProduct(n.data, num_output=40, weight_filler=dict(type='xavier'))
    # transform the output through the ReLU (rectified linear) non-linearity
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    # score the (now non-linear) features
    n.ip2 = L.InnerProduct(n.ip1, num_output=2, weight_filler=dict(type='xavier'))
    # same accuracy and loss as before
    n.accuracy = L.Accuracy(n.ip2, n.label)
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
示例#19
0
def net(hdf5_list, batch_size):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size,
                                 source=hdf5_list,
                                 ntop=2)  # ntop: two param returns
    n.ip1 = L.InnerProduct(n.data,
                           num_output=50,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1,
                           num_output=2,
                           weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    n.accu = L.Accuracy(n.ip2, n.label)
    return n.to_proto()
def createAutoencoder(hdf5, input_size, batch_size, phase):
    n = caffe.NetSpec()
    if phase == "inference":
        n.data = L.Input(input_param={'shape': {'dim': [1, input_size]}})
    else:
        n.data = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=1)
    n.ip1 = L.InnerProduct(n.data,
                           num_output=256,
                           weight_filler=dict(type='xavier'))
    n.bottleneck = L.Sigmoid(n.ip1, in_place=True)
    n.decode = L.InnerProduct(n.bottleneck,
                              num_output=input_size,
                              weight_filler=dict(type='xavier'))
    n.loss = L.EuclideanLoss(n.decode, n.data)

    return n.to_proto()
    def lenet(self, hdf5, batch_size):
        '''
        Initialise the Basic LeNet layers

        Input Parameters:
        hdf5 : train hdf5 file
        batch_size : batch size for training the system

        Output Parameters:
        n.to_proto() : dictionary of network layers

        '''
        n = caffe.NetSpec()
        n.data, n.label = L.HDF5Data(name="data",
                                     data_param={
                                         'source': hdf5,
                                         'batch_size': batch_size
                                     },
                                     ntop=2,
                                     include={'phase': caffe.TRAIN})
        n.conv1 = L.Convolution(n.data,
                                kernel_size=5,
                                num_output=20,
                                weight_filler=dict(type='xavier'))
        n.pool1 = L.Pooling(n.conv1,
                            kernel_size=2,
                            stride=2,
                            pool=P.Pooling.MAX)
        n.conv2 = L.Convolution(n.pool1,
                                kernel_size=5,
                                num_output=50,
                                weight_filler=dict(type='xavier'))
        n.pool2 = L.Pooling(n.conv2,
                            kernel_size=2,
                            stride=2,
                            pool=P.Pooling.MAX)
        n.ip1 = L.InnerProduct(n.pool2,
                               num_output=500,
                               weight_filler=dict(type='xavier'))
        n.relu1 = L.ReLU(n.ip1, in_place=True)
        n.ip2 = L.InnerProduct(n.relu1,
                               num_output=10,
                               weight_filler=dict(type='xavier'))
        n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
        n.accuracy = L.Accuracy(n.ip2, n.label, include={'phase': caffe.TEST})
        print(str(n.to_proto()))
        return n.to_proto()
    def lenet_test(self, hdf5, batch_size):
        '''
        Declaring the validation phase for the network layer

        Input Parameters:
        hdf5 : val hdf5 file
        batch_size : batch size for validating the system
        '''
        n = caffe.NetSpec()
        n.data, n.label = L.HDF5Data(name="data",
                                     data_param={
                                         'source': hdf5,
                                         'batch_size': batch_size
                                     },
                                     ntop=2,
                                     include={'phase': caffe.TEST})
        return n.to_proto()
示例#23
0
def classificationNet(h5, batch_size, layerNeuronNum, layerNum, classNum,
                      learned_param):
    n = caffe.NetSpec()

    n.data, n.label = L.HDF5Data(source=h5,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 ntop=2)
    flatdata = L.Flatten(n.data)
    flatdata_name = 'flatdata'
    n.__setattr__(flatdata_name, flatdata)

    param = learned_param
    for l in range(layerNum):
        if l == 0:
            encoder_name_last = flatdata_name
        else:
            encoder_name_last = relu_en_name

        encoder = L.InnerProduct(n[encoder_name_last],
                                 num_output=layerNeuronNum[l + 1],
                                 param=param,
                                 weight_filler=dict(type='gaussian',
                                                    std=0.005),
                                 bias_filler=dict(type='constant', value=0.1))
        encoder_name = 'encoder' + str(l + 1)
        n.__setattr__(encoder_name, encoder)

        relu_en = L.ReLU(n[encoder_name], in_place=True)
        relu_en_name = 'relu_en' + str(l + 1)
        n.__setattr__(relu_en_name, relu_en)

    output = L.InnerProduct(n[relu_en_name],
                            num_output=classNum,
                            param=param,
                            weight_filler=dict(type='gaussian', std=0.005),
                            bias_filler=dict(type='constant', value=0.1))
    output_name = 'output'
    n.__setattr__(output_name, output)

    n.loss = L.SoftmaxWithLoss(n[output_name], n.label)

    return n.to_proto()
示例#24
0
def logreg(hdf5, batch_size):
    # read in the data
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    # a bit of preprocessing - helpful!
    n.log = L.Log(n.data, base=-1, scale=1, shift=1)
    n.norm = L.BatchNorm(n.log, use_global_stats=False)
    n.scaled = L.Scale(n.norm, bias_term=True)
    # the actual regression - the core of what we want to do!
    n.dropout = L.Dropout(n.scaled, dropout_ratio=0.5)
    n.ip = L.InnerProduct(n.dropout,
                          num_output=nCategories,
                          weight_filler=dict(type='xavier'))
    # don't mess with these. They don't affect learning.
    n.prob = L.Softmax(n.ip)
    n.accuracy1 = L.Accuracy(n.prob, n.label)
    if nCategories > 5:
        n.accuracy5 = L.Accuracy(n.prob, n.label, top_k=5)
    n.loss = L.SoftmaxWithLoss(n.ip, n.label)
    return n.to_proto()
def Bounding_Box_Reg(hdf5, batch_size):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    n.conv1 = L.Convolution(n.data,
                            kernel_size=5,
                            num_output=64,
                            pad=2,
                            weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.conv1, in_place=True)
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1,
                            kernel_size=5,
                            num_output=128,
                            pad=2,
                            weight_filler=dict(type='xavier'))
    n.relu2 = L.ReLU(n.conv2, in_place=True)
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv3 = L.Convolution(n.pool2,
                            kernel_size=3,
                            num_output=256,
                            pad=1,
                            weight_filler=dict(type='xavier'))
    n.relu3 = L.ReLU(n.conv3, in_place=True)
    n.pool3 = L.Pooling(n.conv3, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv4 = L.Convolution(n.pool3,
                            kernel_size=3,
                            num_output=512,
                            pad=1,
                            weight_filler=dict(type='xavier'))
    n.relu4 = L.ReLU(n.conv4, in_place=True)
    n.pool4 = L.Pooling(n.conv4, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.ip1 = L.InnerProduct(n.pool4,
                           num_output=4000,
                           weight_filler=dict(type='xavier'))
    n.dp1 = L.Dropout(n.ip1, dropout_ratio=0.5)
    n.ip2 = L.InnerProduct(n.ip1,
                           num_output=4,
                           weight_filler=dict(type='xavier'))
    n.loss = L.EuclideanLoss(n.ip2, n.label)

    return n.to_proto()
示例#26
0
def InterSRReCNN3D_net(hdf5name, batch_size, layers, kernel , numkernels, padding, residual=True):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, 
                                 source=hdf5name, 
                                 ntop=2, 
                                 include=dict(phase = caffe.TRAIN))
    n.conv1 = L.Convolution(n.data, kernel_size=kernel, num_output=numkernels, stride = 1, pad = padding,
                            param=[{'lr_mult':1},{'lr_mult':0.1}],
                            weight_filler=dict(type='gaussian',std=np.sqrt(2/float(2*kernel**3))),
                            bias_filler = dict( type= "constant", value=0),
                            engine = 1 ) 
    n.relu1 = L.ReLU(n.conv1, 
                     in_place=True,
                     engine = 1)
    for idx in range(2,layers):
        n['conv'+str(idx)] = L.Convolution(n['conv'+str(idx-1)], kernel_size=kernel, num_output=numkernels, stride = 1, pad = padding,
                                param=[{'lr_mult':1},{'lr_mult':0.1}],
                                weight_filler=dict(type='gaussian',std=np.sqrt(2/float(numkernels*kernel**3))),
                                bias_filler = dict( type= "constant", value=0),
                                engine = 1 )                 
        n['relu'+str(idx)] = L.ReLU(n['conv'+str(idx)], 
                             in_place=True,
                             engine = 1)
    n['conv'+str(layers)] = L.Convolution(n['conv'+str(layers-1)], kernel_size=kernel, num_output=1, stride = 1, pad = padding,
                            param=[{'lr_mult':1},{'lr_mult':0.1}],
                            weight_filler=dict(type='gaussian',std=np.sqrt(2/float(numkernels*kernel**3))),
                            bias_filler = dict( type= "constant", value=0),   
                            engine = 1 )    
    
    if residual == True:
        n.out = L.Eltwise(n['conv'+str(layers)],n.data,
                             operation= 1 ) 
        n.out_flat = L.Flatten(n.out)
    else:
        n.out_flat = L.Flatten(n['conv'+str(layers)])
    n.label_flat = L.Flatten(n.label)
    n.loss = L.EuclideanLoss(n.out_flat,n.label_flat)                
    return n.to_proto()
def MLP(hdf5, batch_size, sample_length, phase):
    n = caffe.NetSpec()
    if phase != 'inference':
        n.data, n.label = L.HDF5Data(batch_size=batch_size,
                                     source=hdf5,
                                     ntop=2)
    else:
        n.data = L.Input(input_param={'shape': {'dim': [1, sample_length]}})
    #n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    n.ip1 = L.InnerProduct(n.data,
                           num_output=512,
                           weight_filler=dict(type='xavier'))
    n.drop1 = L.Dropout(n.ip1, dropout_ratio=0.5, in_place=True)
    n.sig1 = L.ReLU(n.drop1, in_place=True)
    n.ip2 = L.InnerProduct(n.sig1,
                           num_output=256,
                           weight_filler=dict(type='xavier'))
    n.drop2 = L.Dropout(n.ip2, dropout_ratio=0.5, in_place=True)
    n.sig2 = L.ReLU(n.drop2, in_place=True)
    n.ip3 = L.InnerProduct(n.sig2,
                           num_output=128,
                           weight_filler=dict(type='xavier'))
    n.drop3 = L.Dropout(n.ip3, dropout_ratio=0.5, in_place=True)
    n.sig3 = L.ReLU(n.drop3, in_place=True)
    n.ip4 = L.InnerProduct(n.sig3,
                           num_output=2,
                           weight_filler=dict(type='xavier'))
    if phase == 'train':
        n.loss = L.SoftmaxWithLoss(n.ip4, n.label)
        n.prob = L.Softmax(n.ip4)
        n.accuracy = L.Accuracy(n.prob, n.label)
    elif phase == 'test':
        n.prob = L.Softmax(n.ip4)
        #n.accuracy = L.Accuracy(n.prob, n.label)
    elif phase == 'inference':
        n.prob = L.Softmax(n.ip4)

    return n.to_proto()
def model(h5path, batchsize):
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(source=h5path, batch_size=batchsize, ntop=2)
    n.dataim = L.Reshape(n.data, shape={'dim': [batchsize, 1, 28, 28]})
    n.conv1 = L.Convolution(n.dataim,
                            kernel_size=5,
                            num_output=20,
                            weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1,
                            kernel_size=5,
                            num_output=50,
                            weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.fc1 = L.InnerProduct(n.pool2,
                           num_output=500,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.score = L.InnerProduct(n.relu1,
                             num_output=10,
                             weight_filler=dict(type='xavier'))
    n.accuracy = L.Accuracy(n.score, n.label)
    n.loss = L.SoftmaxWithLoss(n.score, n.label)
    return n.to_proto()
示例#29
0
def gen_net(split, batch_size, blk_size, subrate, source_path=''):
    ''' Define network '''
    n = caffe.NetSpec()
    #n.name = net_type;

    # ====================== Step 1. Define the input layer ==========================================
    if net_type == 'ReconNet' or net_type == 'DR2_Stage1' or net_type == 'DR2_Stage2':
        noMeas = int(round(subrate * blk_size * blk_size))
        if split == 'train':
            n.data, n.label = L.HDF5Data(name="data",
                                         batch_size=batch_size,
                                         source=source_path,
                                         ntop=2,
                                         include={'phase': caffe.TRAIN},
                                         type="HDF5Data")
        elif split == 'test':
            n.data, n.label = L.HDF5Data(name="data",
                                         batch_size=2,
                                         source=source_path,
                                         ntop=2,
                                         include={'phase': caffe.TEST},
                                         type="HDF5Data")
        else:
            n.data = L.Input(name="data",
                             ntop=1,
                             input_param={'shape': {
                                 'dim': [1, noMeas, 1, 1]
                             }})

    elif net_type == 'CSNet':
        if split == 'train_val':
            n.data, n.label = L.HDF5Data(name="data",
                                         batch_size=batch_size,
                                         source=source_path,
                                         ntop=2,
                                         include={'phase': caffe.TRAIN},
                                         type="HDF5Data")
            n.data, n.label = L.HDF5Data(name="data",
                                         batch_size=2,
                                         source=source_path,
                                         ntop=2,
                                         include={'phase': caffe.TEST},
                                         type="HDF5Data")
        else:
            n.data = L.Input(
                name="data",
                ntop=1,
                input_param={'shape': {
                    'dim': [1, 1, blk_size, blk_size]
                }})

    # ======================= Stage 2: Define the main network =======================================
    if net_type == 'ReconNet' or net_type == 'DR2_Stage1' or net_type == 'DR2_Stage2':

        if subrate == 0.01:
            adap_std = 0.01
        elif subrate == 0.04:
            adap_std = 0.03
        elif subrate == 0.25:
            adap_std = 0.05
        else:
            adap_std = 0.05

        n.fc1 = L.Convolution(n.data,
                              kernel_size=1,
                              stride=1,
                              num_output=blk_size * blk_size,
                              pad=0,
                              weight_filler=dict(type='gaussian',
                                                 std=adap_std),
                              bias_filler=dict(type='constant', value=0),
                              param=[
                                  dict(lr_mult=1, decay_mult=1),
                                  dict(lr_mult=2, decay_mult=0)
                              ])
        n.reshape = L.Reshape(
            n.fc1,
            reshape_param={'shape': {
                'dim': [0, 1, blk_size, blk_size]
            }})

        if net_type == 'ReconNet':
            n.conv1, n.relu1 = conv_relu(n.reshape, 64, 11, 1, 5)
            n.conv2, n.relu2 = conv_relu(n.relu1, 32, 1, 1, 0)
            n.conv3, n.relu3 = conv_relu(n.relu2, 1, 7, 1, 3, 'gaussian', 0.1)
            n.conv4, n.relu4 = conv_relu(n.relu3, 64, 11, 1, 5)
            n.conv5, n.relu5 = conv_relu(n.relu4, 32, 1, 1, 0)
            n.conv6 = convLayer(n.relu5, 1, 7, 1, 3, 'gaussian', 0.1)

            if (split == 'train' or split == 'test'):
                n.loss = L.EuclideanLoss(n.conv6, n.label)

        elif net_type == 'DR2_Stage1':
            if (split == 'train' or split == 'test'):
                n.loss = L.EuclideanLoss(n.reshape, n.label, loss_weight=1)

        elif net_type == 'DR2_Stage2':
            # 1st residual subnet
            n.conv1r, n.bnorm1, n.scale1, n.relu1 = conv_bn_relu(
                n.reshape, 64, 11, 1, 5)
            n.conv2r, n.bnorm2, n.scale2, n.relu2 = conv_bn_relu(
                n.relu1, 32, 1, 1, 0)
            n.conv3r = L.Convolution(n.relu2,
                                     kernel_size=7,
                                     stride=1,
                                     num_output=1,
                                     pad=3,
                                     weight_filler=dict(type='gaussian',
                                                        std=0.001),
                                     bias_term=False,
                                     param=[dict(lr_mult=0.1)])
            n.res1 = L.Eltwise(n.reshape, n.conv3r)

            # 2nd Residual subnet
            n.conv4r, n.bnorm4, n.scale4, n.relu4 = conv_bn_relu(
                n.res1, 64, 11, 1, 5)
            n.conv5r, n.bnorm5, n.scale5, n.relu5 = conv_bn_relu(
                n.relu4, 32, 1, 1, 0)
            n.conv6r = L.Convolution(n.relu5,
                                     kernel_size=7,
                                     stride=1,
                                     num_output=1,
                                     pad=3,
                                     weight_filler=dict(type='gaussian',
                                                        std=0.001),
                                     bias_term=False,
                                     param=[dict(lr_mult=0.1)])
            n.res2 = L.Eltwise(n.res1, n.conv6r)

            # 3rd Residual subnet
            n.conv7r, n.bnorm7, n.scale7, n.relu7 = conv_bn_relu(
                n.res2, 64, 11, 1, 5)
            n.conv8r, n.bnorm8, n.scale8, n.relu8 = conv_bn_relu(
                n.relu7, 32, 1, 1, 0)
            n.conv9r = L.Convolution(n.relu8,
                                     kernel_size=7,
                                     stride=1,
                                     num_output=1,
                                     pad=3,
                                     weight_filler=dict(type='gaussian',
                                                        std=0.001),
                                     bias_term=False,
                                     param=[dict(lr_mult=0.1)])
            n.res3 = L.Eltwise(n.res2, n.conv9r)

            # 4th Residual subnet
            n.conv10r, n.bnorm10, n.scale10, n.relu10 = conv_bn_relu(
                n.res3, 64, 11, 1, 5)
            n.conv11r, n.bnorm11, n.scale11, n.relu11 = conv_bn_relu(
                n.relu10, 32, 1, 1, 0)
            n.conv12r = L.Convolution(n.relu11,
                                      kernel_size=7,
                                      stride=1,
                                      num_output=1,
                                      pad=3,
                                      weight_filler=dict(type='gaussian',
                                                         std=0.001),
                                      bias_term=False,
                                      param=[dict(lr_mult=0.1)])
            n.res4 = L.Eltwise(n.res3, n.conv12r)

            # Loss layer
            if (split == 'train' or split == 'test'):
                n.loss = L.EuclideanLoss(n.res4, n.label, loss_weight=1)
                n.loss2 = L.EuclideanLoss(n.reshape, n.label, loss_weight=0)

        return n.to_proto()
示例#30
0
def setLayers_twoBranches(data_source,
                          batch_size,
                          layername,
                          kernel,
                          stride,
                          outCH,
                          label_name,
                          transform_param_in,
                          deploy=False,
                          batchnorm=0,
                          lr_mult_distro=[1, 1, 1]):
    # it is tricky to produce the deploy prototxt file, as the data input is not from a layer, so we have to creat a workaround
    # producing training and testing prototxt files is pretty straight forward
    n = caffe.NetSpec()
    assert len(layername) == len(kernel)
    assert len(layername) == len(stride)
    assert len(layername) == len(outCH)
    num_parts = transform_param['num_parts']

    if deploy == False and "lmdb" not in data_source:
        if (len(label_name) == 1):
            n.data, n.tops[label_name[0]] = L.HDF5Data(hdf5_data_param=dict(
                batch_size=batch_size, source=data_source),
                                                       ntop=2)
        elif (len(label_name) == 2):
            n.data, n.tops[label_name[0]], n.tops[label_name[1]] = L.HDF5Data(
                hdf5_data_param=dict(batch_size=batch_size,
                                     source=data_source),
                ntop=3)
    # produce data definition for deploy net
    elif deploy == False:
        n.data, n.tops['label'] = L.CPMData(
            data_param=dict(backend=1,
                            source=data_source,
                            batch_size=batch_size),
            cpm_transform_param=transform_param_in,
            ntop=2)
        n.tops[label_name[2]], n.tops[label_name[3]], n.tops[
            label_name[4]], n.tops[label_name[5]] = L.Slice(
                n.label,
                slice_param=dict(
                    axis=1, slice_point=[38, num_parts + 1, num_parts + 39]),
                ntop=4)
        n.tops[label_name[0]] = L.Eltwise(n.tops[label_name[2]],
                                          n.tops[label_name[4]],
                                          operation=P.Eltwise.PROD)
        n.tops[label_name[1]] = L.Eltwise(n.tops[label_name[3]],
                                          n.tops[label_name[5]],
                                          operation=P.Eltwise.PROD)

    else:
        input = "data"
        dim1 = 1
        dim2 = 4
        dim3 = 368
        dim4 = 368
        # make an empty "data" layer so the next layer accepting input will be able to take the correct blob name "data",
        # we will later have to remove this layer from the serialization string, since this is just a placeholder
        n.data = L.Layer()

    # something special before everything
    n.image, n.center_map = L.Slice(n.data,
                                    slice_param=dict(axis=1, slice_point=3),
                                    ntop=2)
    n.silence2 = L.Silence(n.center_map, ntop=0)
    #n.pool_center_lower = L.Pooling(n.center_map, kernel_size=9, stride=8, pool=P.Pooling.AVE)

    # just follow arrays..CPCPCPCPCCCC....
    last_layer = ['image', 'image']
    stage = 1
    conv_counter = 1
    pool_counter = 1
    drop_counter = 1
    local_counter = 1
    state = 'image'  # can be image or fuse
    share_point = 0

    for l in range(0, len(layername)):
        if layername[l] == 'V':  #pretrained VGG layers
            conv_name = 'conv%d_%d' % (pool_counter, local_counter)
            lr_m = lr_mult_distro[0]
            n.tops[conv_name] = L.Convolution(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                num_output=outCH[l],
                pad=int(math.floor(kernel[l] / 2)),
                param=[
                    dict(lr_mult=lr_m, decay_mult=1),
                    dict(lr_mult=lr_m * 2, decay_mult=0)
                ],
                weight_filler=dict(type='gaussian', std=0.01),
                bias_filler=dict(type='constant'))
            last_layer[0] = conv_name
            last_layer[1] = conv_name
            print '%s\tch=%d\t%.1f' % (last_layer[0], outCH[l], lr_m)
            ReLUname = 'relu%d_%d' % (pool_counter, local_counter)
            n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]], in_place=True)
            local_counter += 1
            print ReLUname
        if layername[l] == 'B':
            pool_counter += 1
            local_counter = 1
        if layername[l] == 'C':
            if state == 'image':
                #conv_name = 'conv%d_stage%d' % (conv_counter, stage)
                conv_name = 'conv%d_%d_CPM' % (
                    pool_counter, local_counter
                )  # no image state in subsequent stages
                if stage == 1:
                    lr_m = lr_mult_distro[1]
                else:
                    lr_m = lr_mult_distro[1]
            else:  # fuse
                conv_name = 'Mconv%d_stage%d' % (conv_counter, stage)
                lr_m = lr_mult_distro[2]
                conv_counter += 1
            #if stage == 1:
            #    lr_m = 1
            #else:
            #    lr_m = lr_sub
            n.tops[conv_name] = L.Convolution(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                num_output=outCH[l],
                pad=int(math.floor(kernel[l] / 2)),
                param=[
                    dict(lr_mult=lr_m, decay_mult=1),
                    dict(lr_mult=lr_m * 2, decay_mult=0)
                ],
                weight_filler=dict(type='gaussian', std=0.01),
                bias_filler=dict(type='constant'))
            last_layer[0] = conv_name
            last_layer[1] = conv_name
            print '%s\tch=%d\t%.1f' % (last_layer[0], outCH[l], lr_m)

            if layername[l + 1] != 'L':
                if (state == 'image'):
                    if (batchnorm == 1):
                        batchnorm_name = 'bn%d_stage%d' % (conv_counter, stage)
                        n.tops[batchnorm_name] = L.BatchNorm(
                            n.tops[last_layer[0]],
                            param=[
                                dict(lr_mult=0),
                                dict(lr_mult=0),
                                dict(lr_mult=0)
                            ])
                        #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                        last_layer[0] = batchnorm_name
                    #ReLUname = 'relu%d_stage%d' % (conv_counter, stage)
                    ReLUname = 'relu%d_%d_CPM' % (pool_counter, local_counter)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]],
                                              in_place=True)
                else:
                    if (batchnorm == 1):
                        batchnorm_name = 'Mbn%d_stage%d' % (conv_counter,
                                                            stage)
                        n.tops[batchnorm_name] = L.BatchNorm(
                            n.tops[last_layer[0]],
                            param=[
                                dict(lr_mult=0),
                                dict(lr_mult=0),
                                dict(lr_mult=0)
                            ])
                        #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                        last_layer[0] = batchnorm_name
                    ReLUname = 'Mrelu%d_stage%d' % (conv_counter, stage)
                    n.tops[ReLUname] = L.ReLU(n.tops[last_layer[0]],
                                              in_place=True)
                #last_layer = ReLUname
                print ReLUname

            #conv_counter += 1
            local_counter += 1

        elif layername[l] == 'C2':
            for level in range(0, 2):
                if state == 'image':
                    #conv_name = 'conv%d_stage%d' % (conv_counter, stage)
                    conv_name = 'conv%d_%d_CPM_L%d' % (
                        pool_counter, local_counter, level + 1
                    )  # no image state in subsequent stages
                    if stage == 1:
                        lr_m = lr_mult_distro[1]
                    else:
                        lr_m = lr_mult_distro[1]
                else:  # fuse
                    conv_name = 'Mconv%d_stage%d_L%d' % (conv_counter, stage,
                                                         level + 1)
                    lr_m = lr_mult_distro[2]
                    #conv_counter += 1
                #if stage == 1:
                #    lr_m = 1
                #else:
                #    lr_m = lr_sub
                if layername[l + 1] == 'L2' or layername[l + 1] == 'L3':
                    if level == 0:
                        outCH[l] = 38
                    else:
                        outCH[l] = 19

                n.tops[conv_name] = L.Convolution(
                    n.tops[last_layer[level]],
                    kernel_size=kernel[l],
                    num_output=outCH[l],
                    pad=int(math.floor(kernel[l] / 2)),
                    param=[
                        dict(lr_mult=lr_m, decay_mult=1),
                        dict(lr_mult=lr_m * 2, decay_mult=0)
                    ],
                    weight_filler=dict(type='gaussian', std=0.01),
                    bias_filler=dict(type='constant'))
                last_layer[level] = conv_name
                print '%s\tch=%d\t%.1f' % (last_layer[level], outCH[l], lr_m)

                if layername[l + 1] != 'L2' and layername[l + 1] != 'L3':
                    if (state == 'image'):
                        if (batchnorm == 1):
                            batchnorm_name = 'bn%d_stage%d_L%d' % (
                                conv_counter, stage, level + 1)
                            n.tops[batchnorm_name] = L.BatchNorm(
                                n.tops[last_layer[level]],
                                param=[
                                    dict(lr_mult=0),
                                    dict(lr_mult=0),
                                    dict(lr_mult=0)
                                ])
                            #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                            last_layer[level] = batchnorm_name
                        #ReLUname = 'relu%d_stage%d' % (conv_counter, stage)
                        ReLUname = 'relu%d_%d_CPM_L%d' % (
                            pool_counter, local_counter, level + 1)
                        n.tops[ReLUname] = L.ReLU(n.tops[last_layer[level]],
                                                  in_place=True)
                    else:
                        if (batchnorm == 1):
                            batchnorm_name = 'Mbn%d_stage%d_L%d' % (
                                conv_counter, stage, level + 1)
                            n.tops[batchnorm_name] = L.BatchNorm(
                                n.tops[last_layer[level]],
                                param=[
                                    dict(lr_mult=0),
                                    dict(lr_mult=0),
                                    dict(lr_mult=0)
                                ])
                            #scale_filler=dict(type='constant', value=1), shift_filler=dict(type='constant', value=0.001))
                            last_layer[level] = batchnorm_name
                        ReLUname = 'Mrelu%d_stage%d_L%d' % (conv_counter,
                                                            stage, level + 1)
                        n.tops[ReLUname] = L.ReLU(n.tops[last_layer[level]],
                                                  in_place=True)
                    print ReLUname

            conv_counter += 1
            local_counter += 1

        elif layername[l] == 'P':  # Pooling
            n.tops['pool%d_stage%d' % (pool_counter, stage)] = L.Pooling(
                n.tops[last_layer[0]],
                kernel_size=kernel[l],
                stride=stride[l],
                pool=P.Pooling.MAX)
            last_layer[0] = 'pool%d_stage%d' % (pool_counter, stage)
            pool_counter += 1
            local_counter = 1
            conv_counter += 1
            print last_layer[0]

        elif layername[l] == 'L':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            if deploy == False and "lmdb" not in data_source:
                n.tops['map_vec_stage%d' % stage] = L.Flatten(
                    n.tops[last_layer[0]])
                n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                    n.tops['map_vec_stage%d' % stage], n.tops[label_name[1]])
            elif deploy == False:
                level = 1
                name = 'weight_stage%d' % stage
                n.tops[name] = L.Eltwise(n.tops[last_layer[level]],
                                         n.tops[label_name[(level + 2)]],
                                         operation=P.Eltwise.PROD)
                n.tops['loss_stage%d' % stage] = L.EuclideanLoss(
                    n.tops[name], n.tops[label_name[level]])

            print 'loss %d' % stage
            stage += 1
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'L2':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            weight = [lr_mult_distro[3], 1]
            # print lr_mult_distro[3]
            for level in range(0, 2):
                if deploy == False and "lmdb" not in data_source:
                    n.tops['map_vec_stage%d_L%d' %
                           (stage, level + 1)] = L.Flatten(
                               n.tops[last_layer[level]])
                    n.tops['loss_stage%d_L%d' %
                           (stage, level + 1)] = L.EuclideanLoss(
                               n.tops['map_vec_stage%d' % stage],
                               n.tops[label_name[level]],
                               loss_weight=weight[level])
                elif deploy == False:
                    name = 'weight_stage%d_L%d' % (stage, level + 1)
                    n.tops[name] = L.Eltwise(n.tops[last_layer[level]],
                                             n.tops[label_name[(level + 2)]],
                                             operation=P.Eltwise.PROD)
                    n.tops['loss_stage%d_L%d' %
                           (stage, level + 1)] = L.EuclideanLoss(
                               n.tops[name],
                               n.tops[label_name[level]],
                               loss_weight=weight[level])

                print 'loss %d level %d' % (stage, level + 1)

            stage += 1
            #last_connect = last_layer
            #last_layer = 'image'
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'L3':
            # Loss: n.loss layer is only in training and testing nets, but not in deploy net.
            weight = [lr_mult_distro[3], 1]
            # print lr_mult_distro[3]
            if deploy == False:
                level = 0
                n.tops['loss_stage%d_L%d' %
                       (stage, level + 1)] = L.Euclidean2Loss(
                           n.tops[last_layer[level]],
                           n.tops[label_name[level]],
                           n.tops[label_name[2]],
                           loss_weight=weight[level])
                print 'loss %d level %d' % (stage, level + 1)
                level = 1
                n.tops['loss_stage%d_L%d' %
                       (stage, level + 1)] = L.EuclideanLoss(
                           n.tops[last_layer[level]],
                           n.tops[label_name[level]],
                           loss_weight=weight[level])
                print 'loss %d level %d' % (stage, level + 1)

            stage += 1
            #last_connect = last_layer
            #last_layer = 'image'
            conv_counter = 1
            pool_counter = 1
            drop_counter = 1
            local_counter = 1
            state = 'image'

        elif layername[l] == 'D':
            if deploy == False:
                n.tops['drop%d_stage%d' % (drop_counter, stage)] = L.Dropout(
                    n.tops[last_layer[0]],
                    in_place=True,
                    dropout_param=dict(dropout_ratio=0.5))
                drop_counter += 1
        elif layername[l] == '@':
            #if not share_point:
            #    share_point = last_layer
            n.tops['concat_stage%d' % stage] = L.Concat(
                n.tops[last_layer[0]],
                n.tops[last_layer[1]],
                n.tops[share_point],
                concat_param=dict(axis=1))

            local_counter = 1
            state = 'fuse'
            last_layer[0] = 'concat_stage%d' % stage
            last_layer[1] = 'concat_stage%d' % stage
            print last_layer
        elif layername[l] == '$':
            share_point = last_layer[0]
            pool_counter += 1
            local_counter = 1
            print 'share'

    # final process
    stage -= 1
    #if stage == 1:
    #    n.silence = L.Silence(n.pool_center_lower, ntop=0)

    if deploy == False:
        return str(n.to_proto())
        # for generating the deploy net
    else:
        # generate the input information header string
        deploy_str = 'input: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}\ninput_dim: {}'.format(
            '"' + input + '"', dim1, dim2, dim3, dim4)
        # assemble the input header with the net layers string.  remove the first placeholder layer from the net string.
        return deploy_str + '\n' + 'layer {' + 'layer {'.join(
            str(n.to_proto()).split('layer {')[2:])