コード例 #1
0
Transforming network definition from mxnet.json to caffe.prototxt
'''
import sys
import json
import sys
sys.path.insert(0, '/home/huangxuankun/caffe/caffe/python')
import caffe
from caffe import layers as L
from caffe import params as P

# model_def: Inception-7-symbol.json
model_def = sys.argv[1]

N = caffe.NetSpec()
N.data, N.label = L.Data(batch_size=64,
                         backend=P.Data.LMDB, source='/home/tmp_data_dir/huangxuankun/batch/imagenet2015/CLS/val_lmdb',
                         transform_param=dict(scale=1./255, crop_size=299), include=dict(phase=True), ntop=2)

def gen_conv(name, bottom, kernel_size, num_output, stride, pad, group=1):
	if kernel_size[0] == kernel_size[1]:
		exec(name+'=L.Convolution('+bottom+', kernel_size='+str(kernel_size[0])+', num_output='+str(num_output)+', stride='+str(stride)+', pad='+str(pad[0])+', group='+str(group)+', bias_term=False, weight_filler=dict(type="gaussian", std=0.01))')
	else:
		exec(name+'=L.Convolution('+bottom+', kernel_h='+str(kernel_size[0])+', kernel_w='+str(kernel_size[1])+', num_output='+str(num_output)+', stride='+str(stride)+', pad_h='+str(pad[0])+', pad_w='+str(pad[1])+', group='+str(group)+', bias_term=False, weight_filler=dict(type="gaussian", std=0.01))')

def gen_pooling(name, bottom, kernel_size, stride, pad, pool_type):
	if pool_type == 'max':
		exec(name+'=L.Pooling('+bottom+', kernel_size='+str(kernel_size[0])+', stride='+str(stride)+', pad='+str(pad[0])+', pool=P.Pooling.MAX)')
	elif pool_type == 'avg':
		exec(name+'=L.Pooling('+bottom+', kernel_size='+str(kernel_size[0])+', stride='+str(stride)+', pad='+str(pad[0])+', pool=P.Pooling.AVE)')

def gen_fc(name, bottom, num_output):
コード例 #2
0
def make_resnet(training_data='cifar10_train',
                test_data='cifar10_test',
                mean_file='mean.binaryproto',
                num_res_in_stage=3):

    num_feature_maps = np.array([16, 32, 64])  # feature map size: [32, 16, 8]

    n = caffe.NetSpec()
    # make training data layer
    n.data, n.label = L.Data(source=training_data,
                             backend=P.Data.LMDB,
                             batch_size=128,
                             ntop=2,
                             transform_param=dict(crop_size=32,
                                                  mean_file=mean_file,
                                                  mirror=True),
                             image_data_param=dict(shuffle=True),
                             include=dict(phase=0))
    # make test data layer
    n.test_data, n.test_label = L.Data(source=test_data,
                                       backend=P.Data.LMDB,
                                       batch_size=100,
                                       ntop=2,
                                       transform_param=dict(
                                           crop_size=32,
                                           mean_file=mean_file,
                                           mirror=False),
                                       include=dict(phase=1))
    # conv1 should accept both training and test data layers. But this is inconvenient to code in pycaffe.
    # You have to write two conv layers for them. To deal with this, I temporarily ignore the test data layer
    # and let conv1 accept the output of training data layer. Then, after making the whole prototxt, I postprocess
    # the top name of the two data layers, renaming their names to the same.
    n.conv1, n.bn_conv1_train, n.bn_conv1_test, n.scale_conv1, n.relu_conv1 = \
                      conv_bn_scale_relu(n.data, kernel_size=3, num_out=16, stride=1, pad=1, params=conv_params)

    # set up a checkpoint so as to know where we get.
    checkpoint = 'n.relu_conv1'

    # start making blocks.
    # num_feature_maps: the number of feature maps for each stage. Default is [16,32,64],
    #                   suggesting the network has three stages.
    # num_res_in_stage: a parameter from the original paper, telling us how many blocks there are in
    #                   each stage.
    for num_map in num_feature_maps:
        num_map = int(num_map)
        for res in list(range(num_res_in_stage)):
            # stage name
            stage = 'map' + str(num_map) + '_' + str(res + 1) + '_'
            # use the projecting block when downsample the feature map
            if np.where(num_feature_maps == num_map)[0] >= 1 and res == 0:
                make_res = 'n.' + stage + 'conv_proj,' + \
                           'n.' + stage + 'bn_proj_train,' + \
                           'n.' + stage + 'bn_proj_test,' + \
                           'n.' + stage + 'scale_proj,' + \
                           'n.' + stage + 'conv_a,' + \
                           'n.' + stage + 'bn_a_train, ' + \
                           'n.' + stage + 'bn_a_test, ' + \
                           'n.' + stage + 'scale_a, ' + \
                           'n.' + stage + 'relu_a, ' + \
                           'n.' + stage + 'conv_b, ' + \
                           'n.' + stage + 'bn_b_train, ' + \
                           'n.' + stage + 'bn_b_test, ' + \
                           'n.' + stage + 'scale_b, ' + \
                           'n.' + stage + 'eltsum, ' + \
                           'n.' + stage + 'relu_after_sum' + \
                           ' = project_residual(' + checkpoint + ', num_out=num_map, stride=2, pad=1)'
                exec(make_res)
                checkpoint = 'n.' + stage + 'relu_after_sum'  # where we get
                continue

            # most blocks have this shape
            make_res = 'n.' + stage + 'conv_a, ' + \
                       'n.' + stage + 'bn_a_train, ' + \
                       'n.' + stage + 'bn_a_test, ' + \
                       'n.' + stage + 'scale_a, ' + \
                       'n.' + stage + 'relu_a, ' + \
                       'n.' + stage + 'conv_b, ' + \
                       'n.' + stage + 'bn_b_train, ' + \
                       'n.' + stage + 'bn_b_test, ' + \
                       'n.' + stage + 'scale_b, ' + \
                       'n.' + stage + 'eltsum, ' + \
                       'n.' + stage + 'relu_after_sum' + \
                       ' = identity_residual(' + checkpoint + ', num_out=num_map, stride=1, pad=1)'
            exec(make_res)
            checkpoint = 'n.' + stage + 'relu_after_sum'  # where we get

    # add the pooling layer
    exec('n.pool_global = L.Pooling(' + checkpoint +
         ', pool=P.Pooling.AVE, global_pooling=True)')
    n.score = L.InnerProduct(
        n.pool_global,
        num_output=10,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type='gaussian', std=0.01),
        bias_filler=dict(type='constant', value=0))
    n.loss = L.SoftmaxWithLoss(n.score, n.label)
    n.acc = L.Accuracy(n.score, n.label)

    return n.to_proto()
def create_neural_net(input_file, batch_size=50):
    net = caffe.NetSpec()
    net.data, net.label = L.Data(batch_size=batch_size,
                                 source=input_file,
                                 backend=caffe.params.Data.LMDB,
                                 ntop=2,
                                 include=dict(phase=caffe.TEST),
                                 name='juniward04')

    ## pre-process
    net.conv1 = L.Convolution(net.data,
                              num_output=16,
                              kernel_size=4,
                              stride=1,
                              pad=1,
                              weight_filler=dict(type='dct4'),
                              param=[{
                                  'lr_mult': 0,
                                  'decay_mult': 0
                              }],
                              bias_term=False)
    TRUNCABS = caffe_pb2.QuantTruncAbsParameter.TRUNCABS
    net.quanttruncabs = L.QuantTruncAbs(net.conv1,
                                        process=TRUNCABS,
                                        threshold=8,
                                        in_place=True)

    ## block 1
    [
        net.conv1_proj, net.bn2, net.scale2, net.conv512_1, net.bn2_1,
        net.scale2_1, net.relu512_1, net.conv512_to_256, net.bn2_2,
        net.scale2_2, net.res512_to_256, net.relu512_to_256
    ] = add_downsampling_block(net.quanttruncabs, 12)
    ## block 2
    [
        net.conv256_1, net.bn2_3, net.scale2_3, net.relu256_1, net.conv256_2,
        net.bn2_4, net.scale2_4, net.res256_2, net.relu256_2
    ] = add_skip_block(net.res512_to_256, 24)
    ## block 2_1
    [
        net.conv256_4, net.bn3_1, net.scale3_1, net.relu256_4, net.conv256_5,
        net.bn3_2, net.scale3_2, net.res256_5, net.relu256_5
    ] = add_skip_block(net.res256_2, 24)
    ## block 2_2
    [
        net.conv256_6, net.bn4_1, net.scale4_1, net.relu256_6, net.conv256_7,
        net.bn4_2, net.scale4_2, net.res256_7, net.relu256_7
    ] = add_skip_block(net.res256_5, 24)
    ## block 3
    [
        net.res256_2_proj, net.bn2_5, net.scale2_5, net.conv256_3, net.bn2_6,
        net.scale2_6, net.relu256_3, net.conv256_to_128, net.bn2_7,
        net.scale2_7, net.res256_to_128, net.relu256_to_128
    ] = add_downsampling_block(net.res256_7, 24)
    ##    ## block 4
    ##    [net.conv128_1, net.bn2_8, net.scale2_8, net.relu128_1, net.conv128_2, net.bn2_9,
    ##     net.scale2_9, net.res128_2, net.relu128_2] = add_skip_block(net.res256_to_128, 48)
    ## block 4_1
    [
        net.conv128_4, net.bn3_3, net.scale3_3, net.relu128_4, net.conv128_5,
        net.bn3_4, net.scale3_4, net.res128_5, net.relu128_5
    ] = add_skip_block(net.res256_to_128, 48)
    ## block 4_2
    [
        net.conv128_6, net.bn4_3, net.scale4_3, net.relu128_6, net.conv128_7,
        net.bn4_4, net.scale4_4, net.res128_7, net.relu128_7
    ] = add_skip_block(net.res128_5, 48)
    ## block 5
    [
        net.res128_2_proj, net.bn2_10, net.scale2_10, net.conv128_3,
        net.bn2_11, net.scale2_11, net.relu128_3, net.conv128_to_64,
        net.bn2_12, net.scale2_12, net.res128_to_64, net.relu128_to_64
    ] = add_downsampling_block(net.res128_7, 48)
    ## block 6
    [
        net.conv64_1, net.bn2_13, net.scale2_13, net.relu64_1, net.conv64_2,
        net.bn2_14, net.scale2_14, net.res64_2, net.relu64_2
    ] = add_skip_block(net.res128_to_64, 96)
    ## block 6_1
    [
        net.conv64_4, net.bn3_5, net.scale3_5, net.relu64_4, net.conv64_5,
        net.bn3_6, net.scale3_6, net.res64_5, net.relu64_5
    ] = add_skip_block(net.res64_2, 96)
    ## block 6_2
    [
        net.conv64_6, net.bn4_5, net.scale4_5, net.relu64_6, net.conv64_7,
        net.bn4_6, net.scale4_6, net.res64_7, net.relu64_7
    ] = add_skip_block(net.res64_5, 96)
    ## block 7
    [
        net.res64_2_proj, net.bn2_15, net.scale2_15, net.conv64_3, net.bn2_16,
        net.scale2_16, net.relu64_3, net.conv64_to_32, net.bn2_17,
        net.scale2_17, net.res64_to_32, net.relu64_to_32
    ] = add_downsampling_block(net.res64_7, 96)
    ## block 8
    [
        net.conv32_1, net.bn2_18, net.scale2_18, net.relu32_1, net.conv32_2,
        net.bn2_19, net.scale2_19, net.res32_2, net.relu32_2
    ] = add_skip_block(net.res64_to_32, 192)
    ## block 8_1
    [
        net.conv32_4, net.bn3_7, net.scale3_7, net.relu32_4, net.conv32_5,
        net.bn3_8, net.scale3_8, net.res32_5, net.relu32_5
    ] = add_skip_block(net.res32_2, 192)
    ## block 8_2
    [
        net.conv32_6, net.bn4_7, net.scale4_7, net.relu32_6, net.conv32_7,
        net.bn4_8, net.scale4_8, net.res32_7, net.relu32_7
    ] = add_skip_block(net.res32_5, 192)
    ## block 9
    [
        net.res32_2_proj, net.bn2_20, net.scale2_20, net.conv32_3, net.bn2_21,
        net.scale2_21, net.relu32_3, net.conv32_to_16, net.bn2_22,
        net.scale2_22, net.res32_to_16, net.relu32_to_16
    ] = add_downsampling_block(net.res32_7, 192)
    ## block 10
    [
        net.conv16_1, net.bn2_23, net.scale2_23, net.relu16_1, net.conv16_2,
        net.bn2_24, net.scale2_24, net.res16_2, net.relu16_2
    ] = add_skip_block(net.res32_to_16, 384)
    ## block 10_1
    [
        net.conv16_3, net.bn3_9, net.scale3_9, net.relu16_3, net.conv16_4,
        net.bn3_10, net.scale3_10, net.res16_4, net.relu16_4
    ] = add_skip_block(net.res16_2, 384)
    ## block 10_2
    [
        net.conv16_5, net.bn4_9, net.scale4_9, net.relu16_5, net.conv16_6,
        net.bn4_10, net.scale4_10, net.res16_6, net.relu16_6
    ] = add_skip_block(net.res16_4, 384)

    ## global pool
    AVE = caffe_pb2.PoolingParameter.AVE
    net.global_pool = L.Pooling(net.res16_6, pool=AVE, kernel_size=8, stride=1)

    ## full connecting
    net.fc = L.InnerProduct(net.global_pool,
                            param=[{
                                'lr_mult': 1
                            }, {
                                'lr_mult': 2
                            }],
                            num_output=2,
                            weight_filler=dict(type='xavier'),
                            bias_filler=dict(type='constant'))
    ## accuracy
    net.accuracy = L.Accuracy(net.fc,
                              net.label,
                              include=dict(phase=caffe.TEST))
    ## loss
    net.loss = L.SoftmaxWithLoss(net.fc, net.label)

    return net.to_proto()
コード例 #4
0
ファイル: resnet.py プロジェクト: zys1994/caffe-model
    def resnet_layers_proto(self,
                            batch_size,
                            phase='TRAIN',
                            stages=(3, 4, 6, 3)):
        """

        :param batch_size: the batch_size of train and test phase
        :param phase: TRAIN or TEST
        :param stages: the num of layers = 2 + 3*sum(stages), layers would better be chosen from [50, 101, 152]
                       {every stage is composed of 1 residual_branch_shortcut module and stage[i]-1 residual_branch
                       modules, each module consists of 3 conv layers}
                        (3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
        """
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=224,
                                     mean_value=[104, 117, 123],
                                     mirror=mirror))

        n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
            conv_bn_scale_relu(n.data, num_output=64, kernel_size=7, stride=2, pad=3)  # 64x112x112
        n.pool1 = L.Pooling(n.conv1,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 64x56x56

        for num in xrange(len(stages)):  # num = 0, 1, 2, 3
            for i in xrange(stages[num]):
                if i == 0:
                    stage_string = branch_shortcut_string
                    bottom_string = [
                        'n.pool1',
                        'n.res2b%s' % str(stages[0] - 1),
                        'n.res3b%s' % str(stages[1] - 1),
                        'n.res4b%s' % str(stages[2] - 1)
                    ][num]
                else:
                    stage_string = branch_string
                    if i == 1:
                        bottom_string = 'n.res%sa' % str(num + 2)
                    else:
                        bottom_string = 'n.res%sb%s' % (str(num + 2),
                                                        str(i - 1))
                exec(
                    stage_string.replace('(stage)', str(num + 2)).replace(
                        '(bottom)', bottom_string).replace(
                            '(num)', str(2**num * 64)).replace(
                                '(order)',
                                str(i)).replace('(stride)',
                                                str(int(num > 0) + 1)))

        exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.\
            replace('(bottom)', 'n.res5b%s' % str(stages[3] - 1))
        n.classifier = L.InnerProduct(n.pool5,
                                      num_output=self.classifier_num,
                                      param=[
                                          dict(lr_mult=1, decay_mult=1),
                                          dict(lr_mult=2, decay_mult=0)
                                      ],
                                      weight_filler=dict(type='xavier'),
                                      bias_filler=dict(type='constant',
                                                       value=0))
        n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1),
                                         accuracy_param=dict(top_k=5))

        return n.to_proto()
コード例 #5
0
ファイル: alexnet.py プロジェクト: zbxzc35/caffe-model
    def alexnet_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=227,
                                     mean_value=[104, 117, 123],
                                     mirror=mirror))

        n.conv1, n.relu1 = conv_relu(n.data,
                                     num_output=96,
                                     kernel_size=11,
                                     stride=4)  # 96x55x55
        n.norm1 = L.LRN(n.conv1, local_size=5, alpha=0.0001, beta=0.75)
        n.pool1 = L.Pooling(n.norm1,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 96x27x27

        n.conv2, n.relu2 = conv_relu(n.pool1,
                                     num_output=256,
                                     kernel_size=5,
                                     pad=2,
                                     group=2)  # 256x27x27
        n.norm2 = L.LRN(n.conv2, local_size=5, alpha=0.0001, beta=0.75)
        n.pool2 = L.Pooling(n.norm2,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 256x13x13

        n.conv3, n.relu3 = conv_relu(n.pool2,
                                     num_output=384,
                                     kernel_size=3,
                                     pad=1)  # 384x13x13
        n.conv4, n.relu4 = conv_relu(n.conv3,
                                     num_output=384,
                                     kernel_size=3,
                                     pad=1,
                                     group=2)  # 384x13x13

        n.conv5, n.relu5 = conv_relu(n.conv4,
                                     num_output=256,
                                     kernel_size=3,
                                     pad=1,
                                     group=2)  # 256x13x13
        n.pool5 = L.Pooling(n.conv5,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 256x6x16

        n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5,
                                               num_output=4096)  # 4096x1x1
        n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6,
                                               num_output=4096)  # 4096x1x1
        n.fc8 = L.InnerProduct(n.fc7,
                               num_output=self.classifier_num,
                               param=[
                                   dict(lr_mult=1, decay_mult=1),
                                   dict(lr_mult=2, decay_mult=0)
                               ],
                               weight_filler=dict(type='gaussian', std=0.01),
                               bias_filler=dict(type='constant', value=0))
        n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(
                n.pool4, n.label)

        return n.to_proto()
コード例 #6
0
ファイル: jpeg_input.py プロジェクト: EunjuYang/IDLE
    def compile_time_operation(self, learning_option, cluster):
        option = learning_option.get("option", self.option)
        file_format = learning_option.get("file_format", self.file_format)
        data_path = learning_option.get("data_path", self.data_path)
        label_path = learning_option.get("label_path", self.label_path)
        batch_size = learning_option.get("batch_size", self.batch_size)
        iteration = learning_option.get("iteration", self.iteration)
        image_size = self.image_size
        output_shape = self.output_shape
        
        # for shmcaffe
        #learning_option["move_rate"] = learning_option.get("move_rate", 0.2)
        #learning_option["tau"] = learning_option.get("tau", 1)

        # Phase checkpoint setting, PHASE: 0 for trian, 1 for test
        isTrainTest = 0
        if option.lower() == "test":
            temp_include = dict(phase=caffe.TEST)
            data_path = learning_option.get("test_data_path", data_path)
            test_label_path = learning_option.get("test_label_path", label_path)
            batch_size = learning_option.get("test_batch_size", batch_size)
        elif option.lower() == "datasets":
            temp_include = dict(phase=caffe.TRAIN)
        elif option.lower() == "train_test":
            temp_include = dict(phase=caffe.TRAIN)
            isTrainTest = 1
        else:
            temp_include = dict(phase=caffe.TRAIN)

        # DB Data
        if file_format.lower() in ["lmdb", "leveldb"]:
            # Backend checkpoint setting, default value 0 (leveldb) for backend
            # Data layer setting
            image, label = L.Data(name=self.name, source=data_path,
                             batch_size=batch_size, backend=(0 if file_format.lower()=="leveldb" else 1), include=temp_include, ntop=2)

            if isTrainTest == 1:
                data_path = learning_option.get("test_data_path", data_path)
                batch_size = learning_option.get("test_batch_size", batch_size)
                temp_image, temp_label = L.Data(name=self.name, source=data_path,
                                                batch_size=batch_size,
                                                backend=(0 if file_format.lower() == "leveldb" else 1),
                                                include=dict(phase=caffe.TEST), ntop=2)
                setattr(tempNet, str(self.name) + '.image', temp_image)
                setattr(tempNet, str(self.name) + '.label', temp_label)

        # Image Data
        # TODO: HDF5 와 같은 형식을 또 다른 개별 종륭의 layer 사용 가능하나 현재 raw image 파일 형식만 들어온다고 가정
        else :
            # Read and parse the source directory
            
            ''' for uninfo -twkim
            with open(data_path+'/'+label_path, 'r') as f:
                lines = f.readlines()
            new_lines = []
            for line in lines:
                new_lines.append('/'+line.split()[0]+'.'+file_format + ' ' + line.split()[1]+'\n')
            with open(data_path+'/'+label_path.split('.')[0]+'_caffelist.txt', 'w') as f:
                f.writelines(new_lines)
                f.close()
            '''
            
            # Image Data layer setting
            image, label = L.ImageData(name=self.name,
                                       source=data_path + '/' + label_path.split('.')[0] + '_caffelist.txt',
                                       batch_size=batch_size, include=temp_include, ntop=2, root_folder=data_path,
                                       new_height=image_size[1], new_width=image_size[0])

            if isTrainTest == 1:
                data_path = learning_option.get("test_data_path", data_path)
                batch_size = learning_option.get("test_batch_size", batch_size)
                label_path = learning_option.get("test_label_path", label_path)

                # Read and parse the source directory
                ''' for uninfo - twkim
                with open(data_path + '/' + label_path, 'r') as f:
                    lines = f.readlines()
                new_lines = []
                for line in lines:
                    new_lines.append('/' + line.split()[0] + '.' + file_format + ' ' + line.split()[1] + '\n')
                with open(data_path + '/' + label_path.split('.')[0] + '_caffelist.txt', 'w') as f:
                    f.writelines(new_lines)
                    f.close()
                '''
                # Test image data layer setting
                temp_image, temp_label = L.ImageData(name=self.name,
                                                     source=data_path + '/' + label_path.split('.')[0] + '_caffelist.txt',
                                                     batch_size=batch_size, include=dict(phase=caffe.TEST), ntop=2,
                                                     root_folder=data_path, new_height=image_size[1],
                                                     new_width=image_size[0])
                setattr(tempNet, str(self.name) + '.image', temp_image)
                setattr(tempNet, str(self.name) + '.label', temp_label)

        # Record the layer output information
        self.set_output('image', image)
        self.set_output('label', label)
        self.set_dimension('image', image_size)
        try:
            if isTrainTest != 1:
                del learning_option['option']
            del learning_option['file_format']
            del learning_option['data_path']
            del learning_option['label_path']
            del learning_option['batch_size']
            del learning_option['iteration']
            learning_option['max_iter'] = iteration
        except KeyError:
            pass

        try:
            del learning_option['test_data_path']
            del learning_option['test_label_path']
            del learning_option['test_batch_size']
            learning_option['test_iter'] = learning_option.get("test_iteration", 100)
            del learning_option['test_iteration']
        except KeyError:
            pass
コード例 #7
0
    def resnet_layers_proto(self,
                            batch_size,
                            phase='TRAIN',
                            stages=(3, 4, 6, 3)):
        """
            (3, 4, 6, 3) for 50 layers; (3, 4, 23, 3) for 101 layers; (3, 8, 36, 3) for 152 layers
        """
        global use_global_stats

        n = caffe.NetSpec()
        if phase == 'TRAIN':
            n.data, n.label = L.Data(source=self.train_data,
                                     backend=P.Data.LMDB,
                                     batch_size=batch_size,
                                     ntop=2,
                                     include=dict(phase=0),
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[104, 117, 123],
                                         mirror=True))

            use_global_stats = False

        else:
            n.data, n.label = L.Data(source=self.test_data,
                                     backend=P.Data.LMDB,
                                     batch_size=batch_size,
                                     ntop=2,
                                     include=dict(phase=1),
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[104, 117, 123],
                                         mirror=False))

            use_global_stats = True

        n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = conv_bn_scale_relu(
            n.data,
            num_output=64,
            kernel_size=7,
            stride=2,
            pad=3,
            bias_term=True)
        n.pool1 = L.Pooling(n.conv1,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 64x56x56

        for num in xrange(len(stages)):  # num = 0, 1, 2, 3
            for i in xrange(stages[num]):
                if i == 0:
                    stage_string = branch_shortcut_string
                    bottom_string = [
                        'n.pool1',
                        'n.res2b%s' % str(stages[0] - 1),
                        'n.res3b%s' % str(stages[1] - 1),
                        'n.res4b%s' % str(stages[2] - 1)
                    ][num]
                else:
                    stage_string = branch_string
                    if i == 1:
                        bottom_string = 'n.res%sa' % str(num + 2)
                    else:
                        bottom_string = 'n.res%sb%s' % (str(num + 2),
                                                        str(i - 1))
                exec(
                    stage_string.replace('(stage)', str(num + 2)).replace(
                        '(bottom)', bottom_string).replace(
                            '(num)', str(2**num * 64)).replace(
                                '(order)',
                                str(i)).replace('(stride)',
                                                str(int(num > 0) + 1)))

        exec 'n.pool5 = L.Pooling((bottom), pool=P.Pooling.AVE, global_pooling=True)'.replace(
            '(bottom)', 'n.res5b%s' % str(stages[3] - 1))
        n.classifier = L.InnerProduct(n.pool5, num_output=self.classifier_num)
        n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
        if phase == 'TEST':
            n.accuracy_top1 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1),
                                         accuracy_param=dict(top_k=5))

        return n.to_proto()
コード例 #8
0
ファイル: improve_ip1_new.py プロジェクト: zhuty94/CaffeSVD
    print("SVD NET:")
    for layer_name, param in netSVD.params.items():
        # 0 is weight, 1 is biases
        print(layer_name, param[0].data.shape)

print(type(net.params))
print(net.params.keys())
print("layer ip1:")
print("WEIGHT:")
print(net.params["ip1"][0].data.shape)
print("BIASES:")
print(net.params["ip1"][1].data.shape)

data, label = L.Data(source=test_db,
                     backend=P.Data.LMDB,
                     batch_size=100,
                     ntop=2,
                     mean_file=mean_proto)

model_file = "model/net_SVD%d.caffemodel" % SVD_R

if SVD_R > 0:
    # SVD
    print("SVD %d" % SVD_R)
    u, sigma, vt = la.svd(net.params["ip1"][0].data)
    print("Sigma: ", sigma)
    if SVD_R > len(sigma):
        print("SVD_R is too large :-(")
        sys.exit()
    U = np.matrix(u[:, :SVD_R])
    S = np.matrix(np.diag(sigma[:SVD_R]))
コード例 #9
0
def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()

    n.data, n.label = L.Data(batch_size=batch_size,
                             backend=P.Data.LMDB,
                             source=lmdb,
                             transform_param=dict(crop_size=227,
                                                  mean_value=[104, 117, 123]),
                             ntop=2)

    n.conv1 = L.Convolution(n.data,
                            kernel_size=3,
                            num_output=64,
                            stride=2,
                            weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.conv1, in_place=True)
    n.pool1 = L.Pooling(n.relu1, kernel_size=3, stride=2, pool=P.Pooling.MAX)

    #n.fire2_squeeze1x1 		= L.Convolution(n.pool1, kernel_size=1, num_output=16,weight_filler=dict(type='xavier'))
    #n.fire2_relu_squeeze1x1 	= L.ReLU(n.fire2_squeeze1x1,in_place=True)
    #n.fire2_expand1x1 		= L.Convolution(n.fire2_relu_squeeze1x1, kernel_size=1, num_output=64,weight_filler=dict(type='xavier'))
    #n.fire2_relu_expand1x1 	= L.ReLU(n.fire2_expand1x1,in_place=True)
    #n.fire2_expand3x3 		= L.Convolution(n.fire2_relu_squeeze1x1, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type='xavier'))
    #n.fire2_relu_expand3x3 	= L.ReLU(n.fire2_expand3x3,in_place=True)
    #n.fire2_concat		= L.Concat(n.fire2_relu_expand1x1,n.fire2_relu_expand3x3)

    suffix = ""
    fire_num = 2
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.pool1, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)

    fire_num = 3
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire2_concat, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)
    n.pool3 = L.Pooling(n.fire3_concat,
                        kernel_size=3,
                        stride=2,
                        pool=P.Pooling.MAX)

    #n.fire3_squeeze1x1 		= L.Convolution(n.fire2_concat, kernel_size=1, num_output=16,weight_filler=dict(type='xavier'))
    #n.fire3_relu_squeeze1x1 	= L.ReLU(n.fire3_squeeze1x1,in_place=True)
    #n.fire3_expand1x1 		= L.Convolution(n.fire3_relu_squeeze1x1, kernel_size=1, num_output=64,weight_filler=dict(type='xavier'))
    #n.fire3_relu_expand1x1 	= L.ReLU(n.fire3_expand1x1,in_place=True)
    #n.fire3_expand3x3 		= L.Convolution(n.fire3_relu_squeeze1x1, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type='xavier'))
    #n.fire3_relu_expand3x3 	= L.ReLU(n.fire3_expand3x3,in_place=True)
    #n.fire3_concat		= L.Concat(n.fire3_relu_expand1x1,n.fire3_relu_expand3x3)
    #n.pool3			= L.Pooling(n.fire3_concat, kernel_size=3, stride=2, pool=P.Pooling.MAX)

    suffix = '_cluster'  # root node branch
    fire_num = 4
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.pool3, kernel_size=1, num_output=8,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)
    #    n.pool4_cluster			= L.Pooling(n.fire4_concat, kernel_size=3, stride=2, pool=P.Pooling.MAX)
    fire_num = 5
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire4_concat_cluster, kernel_size=1, num_output=8,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)
    exec 'n.pool{0}{1}			= L.Pooling(n.fire{0}_concat{1}, kernel_size=3, stride=2, pool=P.Pooling.MAX)'.format(
        fire_num, suffix)

    fire_num = 6
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.pool5_cluster, kernel_size=1, num_output=12,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)

    fire_num = 7
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire6_concat_cluster, kernel_size=1, num_output=12,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)

    fire_num = 8
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire7_concat_cluster, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)

    fire_num = 9
    exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire8_concat_cluster, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
        fire_num, suffix)
    exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
        fire_num, suffix)
    exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
        fire_num, suffix)
    exec 'n.drop{0}{1}			= L.Dropout(n.fire{0}_concat{1}, in_place=True, dropout_ratio=0.5)'.format(
        fire_num, suffix)

    fire_num = 10
    exec 'n.conv{0}{1} 			= L.Convolution(n.fire9_concat{1}, kernel_size=1, num_output=4,weight_filler=dict(type="gaussion",mean=0.0,std=0.01))'.format(
        fire_num, suffix)
    #exec 'n.conv{0}_relu{1} 		= L.ReLU(n.conv{0}{1},in_place=True)'.format(fire_num,suffix)
    exec 'n.pool{0}{1}			= L.Pooling(n.conv{0}{1}, global_pooling=True, pool=P.Pooling.AVE)'.format(
        fire_num, suffix)
    #exec 'n.softmax{1}			= L.Softmax(n.pool{0}{1})'.format(fire_num,suffix)

    for i in range(1, num_branch + 1):

        suffix = '_b{0}'.format(i)  # root node branch
        fire_num = 4
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.pool3, kernel_size=1, num_output=8,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)
        #        n.pool4_cluster			= L.Pooling(n.fire4_concat, kernel_size=3, stride=2, pool=P.Pooling.MAX)
        fire_num = 5
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire4_concat_cluster, kernel_size=1, num_output=8,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=32,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)
        exec 'n.pool{0}{1}			= L.Pooling(n.fire{0}_concat{1}, kernel_size=3, stride=2, pool=P.Pooling.MAX)'.format(
            fire_num, suffix)

        fire_num = 6
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.pool5_cluster, kernel_size=1, num_output=12,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)

        fire_num = 7
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire6_concat_cluster, kernel_size=1, num_output=12,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=48,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)

        fire_num = 8
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire7_concat_cluster, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)

        fire_num = 9
        exec 'n.fire{0}_squeeze1x1{1} 	= L.Convolution(n.fire8_concat_cluster, kernel_size=1, num_output=16,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_squeeze1x1{1} 	= L.ReLU(n.fire{0}_squeeze1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand1x1{1}	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand1x1{1} 	= L.ReLU(n.fire{0}_expand1x1{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_expand3x3{1} 	= L.Convolution(n.fire{0}_relu_squeeze1x1{1}, kernel_size=3, pad=1, num_output=64,weight_filler=dict(type="xavier"))'.format(
            fire_num, suffix)
        exec 'n.fire{0}_relu_expand3x3{1} 	= L.ReLU(n.fire{0}_expand3x3{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.fire{0}_concat{1}		= L.Concat(n.fire{0}_relu_expand1x1{1},n.fire{0}_relu_expand3x3{1})'.format(
            fire_num, suffix)
        exec 'n.drop{0}{1}			= L.Dropout(n.fire{0}_concat{1}, in_place=True, dropout_ratio=0.5)'.format(
            fire_num, suffix)

        fire_num = 10
        exec 'n.conv{0}{1} 			= L.Convolution(n.fire9_concat{1}, kernel_size=1, num_output=4,weight_filler=dict(type="gaussion",mean=0.0,std=0.01))'.format(
            fire_num, suffix)
        exec 'n.conv{0}_relu{1} 		= L.ReLU(n.conv{0}{1},in_place=True)'.format(
            fire_num, suffix)
        exec 'n.pool{0}{1}			= L.Pooling(n.conv{0}_relu{1}, global_pooling=True, pool=P.Pooling.AVE)'.format(
            fire_num, suffix)
        exec 'n.softmax{1}			= L.Softmax(n.pool{0}{1})'.format(
            fire_num, suffix)

    exec 'n.final_concat			= L.Concat(n.softmax_b1,n.softmax_b2,n.softmax_b3,n.softmax_b4)'
    exec 'n.final_concat_reshape		= L.Reshape(reshape=dict(dim=[0,-1,1000]))'
    #exec 'n.matvec				= L.Python()'
    #n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    #n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
    #n.loss =  L.SoftmaxWithLoss(n.score, n.label)

    return n.to_proto()
コード例 #10
0
    def alexnet_bn_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=227,
                                     mean_value=[104, 117, 123],
                                     mirror=mirror))

        n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
            factorization_conv_bn_scale_relu(n.data, num_output=96, kernel_size=11, stride=4,)  # 96x55x55
        n.pool1 = L.Pooling(n.conv1,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 96x27x27

        n.conv2, n.conv2_bn, n.conv2_scale, n.conv2_relu = \
            factorization_conv_bn_scale_relu(n.pool1, num_output=256, kernel_size=5, pad=2)  # 256x27x27
        n.pool2 = L.Pooling(n.conv2,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 256x13x13

        n.conv3, n.conv3_bn, n.conv3_scale, n.conv3_relu = \
            factorization_conv_bn_scale_relu(n.pool2, num_output=384, kernel_size=3, pad=1)  # 384x13x13

        n.conv4, n.conv4_bn, n.conv4_scale, n.conv4_relu = \
            factorization_conv_bn_scale_relu(n.conv3, num_output=384, kernel_size=3, pad=1)  # 384x13x13

        n.conv5, n.conv5_bn, n.conv5_scale, n.conv5_relu = \
            factorization_conv_bn_scale_relu(n.conv4, num_output=256, kernel_size=3, pad=1)  # 256x13x13
        n.pool5 = L.Pooling(n.conv5,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)  # 256x6x16

        n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5,
                                               num_output=2048)  # 1024x1x1
        n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6,
                                               num_output=2048)  # 1024x1x1
        n.fc8 = L.InnerProduct(n.fc7,
                               num_output=self.classifier_num,
                               param=[
                                   dict(lr_mult=1, decay_mult=1),
                                   dict(lr_mult=2, decay_mult=0)
                               ],
                               weight_filler=dict(type='gaussian', std=0.01),
                               bias_filler=dict(type='constant', value=0))
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1 = L.Accuracy(n.fc8, n.label, include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.fc8,
                                         n.label,
                                         include=dict(phase=1),
                                         accuracy_param=dict(top_k=5))
        n.loss = L.SoftmaxWithLoss(n.fc8, n.label)

        return n.to_proto()
コード例 #11
0
def InceptionResNetV2(train_lmdb,
                      test_lmdb,
                      input_size=299,
                      batch_size=256,
                      stages=[0, 5, 10, 5],
                      first_output=32,
                      include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=input_size,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=input_size,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size,
                               height=input_size,
                               width=input_size,
                               channels=3,
                               ntop=2,
                               transform_param=dict(mean_value=[104, 117, 123],
                                                    mirror=True),
                               include=dict(phase=getattr(caffe_pb2, 'TEST')))

    Inception_ResNet_A_input = stem(bottom=data,
                                    conv1_num=32,
                                    conv2_num=32,
                                    conv3_num=64,
                                    conv4_num=96,
                                    conv5_num=64,
                                    conv6_num=96,
                                    conv7_num=64,
                                    conv8_num=64,
                                    conv9_num=64,
                                    conv10_num=96,
                                    conv11_num=192)
    for i in xrange(stages[1]):
        Inception_ResNet_A_input = Inception_ResNet_A(
            bottom=Inception_ResNet_A_input,
            bottom_size=384,
            num1x1=32,
            num3x3=48,
            num3x3double=64)

    Inception_ResNet_B_input = ReductionA(bottom=Inception_ResNet_A_input,
                                          num1x1_k=256,
                                          num3x3_l=256,
                                          num3x3_n=384,
                                          num3x3_m=384)

    for i in xrange(stages[2]):
        Inception_ResNet_B_input = Inception_ResNet_B(
            bottom=Inception_ResNet_B_input,
            bottom_size=1152,
            num1x1=192,
            num1x1double=128,
            num7x1=160,
            num1x7=192)

    Inception_ResNet_C_input = ReductionB(bottom=Inception_ResNet_B_input,
                                          num1x1=256,
                                          num3x3=384,
                                          num3x3double=288,
                                          num3x3three=320)

    for i in xrange(stages[3]):
        Inception_ResNet_C_input = Inception_ResNet_C(
            bottom=Inception_ResNet_C_input,
            bottom_size=2144,
            num1x1=192,
            num1x3=224,
            num3x1=256)

    glb_pool = L.Pooling(Inception_ResNet_C_input,
                         pool=P.Pooling.AVE,
                         global_pooling=True)
    dropout = L.Dropout(glb_pool, dropout_ratio=0.2)
    fc = L.InnerProduct(dropout, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
コード例 #12
0
ファイル: create_net.py プロジェクト: Liu-hongwen/caffe-net
def create_net(lmdb, mean_file, batch_size, include_acc=False):
    # 网络规范
    net = caffe.NetSpec()

    net.data, net.label = L.Data(source=lmdb,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(crop_size=227,
                                                      mean_file=mean_file,
                                                      mirror=True))

    net.conv1 = L.Convolution(
        net.data,
        num_output=96,
        kernel_size=11,
        stride=4,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0))

    net.relu1 = L.ReLU(net.conv1, in_place=True)

    net.norm1 = L.LRN(net.conv1, local_size=5, alpha=0.0001, beta=0.75)

    net.pool1 = L.Pooling(net.norm1,
                          pool=P.Pooling.MAX,
                          kernel_size=3,
                          stride=2)

    net.conv2 = L.Convolution(
        net.pool1,
        num_output=256,
        pad=2,
        kernel_size=5,
        group=2,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0.1))

    net.relu2 = L.ReLU(net.conv2, in_place=True)

    net.norm2 = L.LRN(net.conv2, local_size=5, alpha=0.0001, beta=0.75)

    net.pool2 = L.Pooling(net.norm2,
                          pool=P.Pooling.MAX,
                          kernel_size=3,
                          stride=2)

    net.conv3 = L.Convolution(
        net.pool2,
        num_output=384,
        pad=1,
        kernel_size=3,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0))

    net.relu3 = L.ReLU(net.conv3, in_place=True)

    net.conv4 = L.Convolution(
        net.conv3,
        num_output=384,
        pad=1,
        kernel_size=3,
        group=2,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0.1))

    net.relu4 = L.ReLU(net.conv4, in_place=True)

    net.conv5 = L.Convolution(
        net.conv4,
        num_output=256,
        pad=1,
        kernel_size=3,
        group=2,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0.1))

    net.relu5 = L.ReLU(net.conv5, in_place=True)

    net.pool5 = L.Pooling(net.conv5,
                          pool=P.Pooling.MAX,
                          kernel_size=3,
                          stride=2)

    net.fc6 = L.InnerProduct(
        net.pool5,
        num_output=4096,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.005),
        bias_filler=dict(type="constant", value=0.1))

    net.relu6 = L.ReLU(net.fc6, in_place=True)

    net.drop6 = L.Dropout(net.fc6, dropout_ratio=0.5, in_place=True)

    net.fc7 = L.InnerProduct(
        net.fc6,
        num_output=4096,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.005),
        bias_filler=dict(type="constant", value=0.1))

    net.relu7 = L.ReLU(net.fc7, in_place=True)

    net.drop7 = L.Dropout(net.fc7, dropout_ratio=0.5, in_place=True)

    net.fc8 = L.InnerProduct(
        net.fc7,
        num_output=1000,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type="gaussian", std=0.01),
        bias_filler=dict(type="constant", value=0.1))

    net.loss = L.SoftmaxWithLoss(net.fc8, net.label)

    if include_acc:
        net.acc = L.Accuracy(net.fc8, net.label)
        return net.to_proto()

    return net.to_proto()
コード例 #13
0
ファイル: nin.py プロジェクト: zlmzju/caffe-model
 def nin_bn_proto(self, batch_size, phase='TRAIN'):
     n = caffe.NetSpec()
     if phase == 'TRAIN':
         source_data = self.train_data
         mirror = True
     else:
         source_data = self.test_data
         mirror = False
     n.data, n.label = L.Data(source=source_data,
                              backend=P.Data.LMDB,
                              batch_size=batch_size,
                              ntop=2,
                              transform_param=dict(
                                  crop_size=227,
                                  mean_value=[104, 117, 123],
                                  mirror=mirror))
     n.conv1, n.conv1_bn, n.relu0, n.cccp1, n.cccp1_bn, n.relu1, n.cccp2, n.cccp2_bn, n.relu2 = \
         conv_bn_stack_3(n.data, dict(num_output=[96, 96, 96], kernel_size=[11, 1, 1], stride=[4, 1, 1],
                                      pad=[0, 0, 0], group=[1, 1, 1],
                                      weight_type=['gaussian', 'gaussian', 'gaussian'],
                                      weight_std=[0.01, 0.05, 0.05],
                                      bias_type=['constant', 'constant', 'constant'], bias_value=[0, 0, 0]))
     n.pool1 = L.Pooling(n.cccp2_bn,
                         pool=P.Pooling.MAX,
                         kernel_size=3,
                         stride=2)
     n.conv2, n.conv2_bn, n.relu3, n.cccp3, n.cccp3_bn, n.relu4, n.cccp4, n.cccp4_bn, n.relu5 = \
         conv_bn_stack_3(n.pool1, dict(num_output=[256, 256, 256], kernel_size=[5, 1, 1], stride=[1, 1, 1],
                                       pad=[2, 0, 0], group=[1, 1, 1],
                                       weight_type=['gaussian', 'gaussian', 'gaussian'],
                                       weight_std=[0.05, 0.05, 0.05],
                                       bias_type=['constant', 'constant', 'constant'], bias_value=[0, 0, 0]))
     n.pool2 = L.Pooling(n.cccp4_bn,
                         pool=P.Pooling.MAX,
                         kernel_size=3,
                         stride=2)
     n.conv3, n.conv3_bn, n.relu6, n.cccp5, n.cccp5_bn, n.relu7, n.cccp6, n.cccp6_bn, n.relu8 = \
         conv_bn_stack_3(n.pool2, dict(num_output=[384, 384, 384], kernel_size=[3, 1, 1], stride=[2, 1, 1],
                                       pad=[1, 0, 0], group=[1, 1, 1],
                                       weight_type=['gaussian', 'gaussian', 'gaussian'],
                                       weight_std=[0.01, 0.05, 0.05],
                                       bias_type=['constant', 'constant', 'constant'], bias_value=[0, 0, 0]))
     n.pool3 = L.Pooling(n.cccp6_bn,
                         pool=P.Pooling.MAX,
                         kernel_size=3,
                         stride=2)
     n.drop7 = L.Dropout(n.pool3,
                         in_place=True,
                         dropout_param=dict(dropout_ratio=0.5))
     n.conv4, n.conv4_bn, n.relu9, n.cccp7, n.cccp7_bn, n.relu10, n.cccp8, n.cccp8_bn, n.relu11 = \
         conv_bn_stack_3(n.pool3, dict(num_output=[1024, 1024, 1024], kernel_size=[3, 1, 1], stride=[1, 1, 1],
                                       pad=[1, 0, 0], group=[1, 1, 1],
                                       weight_type=['gaussian', 'gaussian', 'gaussian'],
                                       weight_std=[0.01, 0.05, 0.05],
                                       bias_type=['constant', 'constant', 'constant'], bias_value=[0, 0, 0]))
     n.pool4 = L.Pooling(n.cccp8_bn,
                         pool=P.Pooling.MAX,
                         kernel_size=6,
                         stride=1)
     if phase == 'TRAIN':
         n.loss = L.SoftmaxWithLoss(n.pool4, n.label)
     else:
         n.accuracy_top1, n.accuracy_top5 = accuracy_top1_top5(
             n.pool4, n.label)
     return n.to_proto()
コード例 #14
0
        num_classes=20,
        coords=4,
        confidence_threshold=0.01,
        nms_threshold=.45,
        biases=[1.08, 1.19, 3.42, 4.41, 6.63, 11.38, 9.42, 5.11, 16.62, 10.52],
        include={'phase': caffe.TEST})


if __name__ == '__main__':
    net = caffe.NetSpec()
    #net.data = L.Input(shape=[dict(dim=[1, 3, 224, 224])])
    net.data, net.label = L.Data(
        name='data',
        batch_size=32,
        backend=P.Data.LMDB,
        source='examples/imagenet/ilsvrc12_train_lmdb',
        transform_param=dict(crop_size=227,
                             mean_value=[127.5, 127.5, 127.5],
                             scale=1 / 127.5),
        ntop=2,
        include={'phase': caffe.TRAIN})
    net.test_data, net.test_label = L.Data(
        name='data',
        batch_size=25,
        backend=P.Data.LMDB,
        source='examples/imagenet/ilsvrc12_val_lmdb',
        transform_param=dict(crop_size=227,
                             mean_value=[127.5, 127.5, 127.5],
                             scale=1 / 127.5),
        ntop=2,
        top=['data', 'label'],
        include={'phase': caffe.TEST})
コード例 #15
0
def fcn(split,fname_img_lmdb,fname_map_lmdb,learn_all=False):
    
    param = learned_param if learn_all else frozen_param
    
    n = caffe.NetSpec()
    
    # invoke LMDB data loader
    n.data = L.Data(batch_size=1, backend=P.Data.LMDB, source=fname_img_lmdb, ntop=1)
    n.label = L.Data(batch_size=1, backend=P.Data.LMDB, source=fname_map_lmdb, ntop=1)

    # the base net
    n.conv1_1, n.relu1_1 = conv_relu(n.data, 64, pad=100, param=param)
    n.conv1_2, n.relu1_2 = conv_relu(n.relu1_1, 64, param=param)
    n.pool1 = max_pool(n.relu1_2)

    n.conv2_1, n.relu2_1 = conv_relu(n.pool1, 128, param=param)
    n.conv2_2, n.relu2_2 = conv_relu(n.relu2_1, 128, param=param)
    n.pool2 = max_pool(n.relu2_2)

    n.conv3_1, n.relu3_1 = conv_relu(n.pool2, 256, param=param)
    n.conv3_2, n.relu3_2 = conv_relu(n.relu3_1, 256, param=param)
    n.conv3_3, n.relu3_3 = conv_relu(n.relu3_2, 256, param=param)
    n.pool3 = max_pool(n.relu3_3)

    n.conv4_1, n.relu4_1 = conv_relu(n.pool3, 512, param=param)
    n.conv4_2, n.relu4_2 = conv_relu(n.relu4_1, 512, param=param)
    n.conv4_3, n.relu4_3 = conv_relu(n.relu4_2, 512, param=param)
    n.pool4 = max_pool(n.relu4_3)

    n.conv5_1, n.relu5_1 = conv_relu(n.pool4, 512, param=param)
    n.conv5_2, n.relu5_2 = conv_relu(n.relu5_1, 512, param=param)
    n.conv5_3, n.relu5_3 = conv_relu(n.relu5_2, 512, param=param)
    n.pool5 = max_pool(n.relu5_3)

    # fully conv
    n.fc6, n.relu6 = conv_relu(n.pool5, 4096, ks=7, pad=0, param=param)
    n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
    n.fc7, n.relu7 = conv_relu(n.drop6, 4096, ks=1, pad=0, param=param)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)
    
    # the following are parameters being learnt anyway
    # made layer named score_sal, upscore_sal to avoid copying new weights
    n.score_sal = L.Convolution(n.drop7, num_output=1, kernel_size=1, pad=0,
                                param=learned_param) # <- learning weights for this layer

    # CHANGES DUE TO SKIP CONNECTION:
    # don't upscale all the way, only to the previous layer
    # replaced kernel_size=64, stride=32 with:
    n.upscore_sal2 = L.Deconvolution(n.score_sal,
        convolution_param=dict(num_output=1, kernel_size=4, stride=2,
            bias_term=False),param=[dict(lr_mult=0)]) # don't learn upscoring; fix it as bilinear

    n.score_pool4 = L.Convolution(n.pool4, num_output=1, kernel_size=1, pad=0, param=learned_param) # <- learning weights for this layer
    n.score_pool4c = crop(n.score_pool4, n.upscore_sal2)
    n.fuse_pool4 = L.Eltwise(n.upscore_sal2, n.score_pool4c, operation=P.Eltwise.SUM)
    n.upscore16 = L.Deconvolution(n.fuse_pool4,
        convolution_param=dict(num_output=1, kernel_size=32, stride=16,
            bias_term=False),param=[dict(lr_mult=0)])
    # don't learn any of the upscaling (deconvolution) filters - just fix at bilinear
                                  

    # n.score = crop(n.upscore_sal, n.data)
    n.score = crop(n.upscore16, n.data)

    n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label, loss_weight=1) 

    return n.to_proto()
コード例 #16
0
    def resnet_layers_proto(self,
                            batch_size,
                            phase='TRAIN',
                            stages=(3, 4, 6, 3)):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            need_mirror = True
        else:
            source_data = self.test_data
            need_mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=224,
                                     mean_value=[128, 128, 128],
                                     mirror=need_mirror))

        n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
            block_conv_bn_scale_relu( n.data, num_output = 64, kernel_size = 7, stride = 2, pad = 3 )  # 64x112x112
        n.pool1 = L.Pooling(n.conv1,
                            kernel_size=3,
                            stride=2,
                            pool=P.Pooling.MAX)

        residual_num = 0
        for num in xrange(len(stages)):
            for i in xrange(stages[num]):
                residual_num = residual_num + 1

                if num == 0 and i == 0:
                    stage_string = skip_connect_with_dimen_match_no_patch_reduce
                    if residual_num == 1:
                        bottom_string = 'n.pool1'
                    else:
                        bottom_string = 'n.res%s_eletwise' % (
                            str(residual_num - 1))
                elif i == 0 and num > 0:
                    stage_string = skip_connect_with_dimen_match
                    if residual_num == 1:
                        bottom_string = 'n.pool1'
                    else:
                        bottom_string = 'n.res%s_eletwise' % (
                            str(residual_num - 1))
                else:
                    stage_string = skip_connect_no_dimen_match
                    bottom_string = 'n.res%s_eletwise' % (str(residual_num -
                                                              1))
                exec(
                    stage_string.replace('(stage)', str(residual_num)).replace(
                        '(bottom)',
                        bottom_string).replace('(num)', str(2**num * 64)))

        exec 'n.pool5 = L.Pooling( bottom_string, kernel_size=7, stride=1, pool=P.Pooling.AVE)'.replace(
            'bottom_string', 'n.res%s_eletwise' % str(residual_num))

        n.classifier = L.InnerProduct(n.pool5,
                                      num_output=self.classifier_num,
                                      param=[
                                          dict(lr_mult=1, decay_mult=1),
                                          dict(lr_mult=2, decay_mult=0)
                                      ],
                                      weight_filler=dict(type='xavier'),
                                      bias_filler=dict(type='constant',
                                                       value=0))
        n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1),
                                         accuracy_param=dict(top_k=5))

        return n.to_proto()
コード例 #17
0
def segnet_network(data_source, label_source, mode='train'):
    """ Builds a Caffe Network Definition object for SegNet

    Args:
        data_source (str): path to the data LMDB
        label_source (str): path to the label LMDB
        mode (str, optional): 'train', 'test' or 'deploy' (defaults to 'train')

    Returns:
        obj: SegNet (Caffe Network Definition object)
    """
    n = caffe.NetSpec()
    if MEAN_PIXEL is None:
        transform_param = {}
    else:
        transform_param = {'mean_value': MEAN_PIXEL}

    if mode == 'deploy':
        n.data = L.Input(input_param={ 'shape':\
            { 'dim': [BATCH_SIZE, 3, test_patch_size[0], test_patch_size[1]] }
        })
    else:
        n.data = L.Data(batch_size=BATCH_SIZE, backend=P.Data.LMDB,\
                    transform_param=transform_param, source=data_source)
        n.label = L.Data(batch_size=BATCH_SIZE, backend=P.Data.LMDB, source=label_source)

    convolution_block(n, n.data, "conv1_{}", 2, planes=(64,64,64), lr_mult=0.5)
    n.pool1, n.pool1_mask = L.Pooling(n.conv1_2, pool=P.Pooling.MAX, kernel_size=2, stride=2, ntop=2)

    convolution_block(n, n.pool1, "conv2_{}", 2, planes=(128,128,128), lr_mult=0.5)
    n.pool2, n.pool2_mask = L.Pooling(n.conv2_2, pool=P.Pooling.MAX, kernel_size=2, stride=2, ntop=2)

    convolution_block(n, n.pool2, "conv3_{}", 3, planes=(256,256,256), lr_mult=0.5)
    n.pool3, n.pool3_mask = L.Pooling(n.conv3_3, pool=P.Pooling.MAX, kernel_size=2, stride=2, ntop=2)

    convolution_block(n, n.pool3, "conv4_{}", 3, planes=(512,512,512), lr_mult=0.5)
    n.pool4, n.pool4_mask = L.Pooling(n.conv4_3, pool=P.Pooling.MAX, kernel_size=2, stride=2, ntop=2)

    convolution_block(n, n.pool4, "conv5_{}", 3, planes=(512,512,512), lr_mult=0.5)
    n.pool5, n.pool5_mask = L.Pooling(n.conv5_3, pool=P.Pooling.MAX, kernel_size=2, stride=2, ntop=2)

    n.upsample5 = L.Upsample(n.pool5, n.pool5_mask, scale=2)
    convolution_block(n, n.upsample5, "conv5_{}_D", 3, planes=(512,512,512), lr_mult=1, reverse=True)

    n.upsample4 = L.Upsample(n.conv5_1_D, n.pool4_mask, scale=2)
    convolution_block(n, n.upsample4, "conv4_{}_D", 3, planes=(512,512,256), lr_mult=1, reverse=True)

    n.upsample3 = L.Upsample(n.conv4_1_D, n.pool3_mask, scale=2)
    convolution_block(n, n.upsample3, "conv3_{}_D", 3, planes=(256,256,128), lr_mult=1, reverse=True)

    n.upsample2 = L.Upsample(n.conv3_1_D, n.pool2_mask, scale=2)
    convolution_block(n, n.upsample2, "conv2_{}_D", 2, planes=(128,128,64), lr_mult=1, reverse=True)

    n.upsample1 = L.Upsample(n.conv2_1_D, n.pool1_mask, scale=2)
    n.conv1_2_D, n.conv1_2_D_bn, n.conv1_2_D_scale, n.conv1_2_D_relu =\
                                convolution_unit(n.upsample1, 3, 1, 64, lr_mult=1)
    n.conv1_1_D, _, _, _ = convolution_unit(n.conv1_2_D, 3, 1, 6, lr_mult=1)

    if mode == 'train' or mode == 'test':
        n.loss = L.SoftmaxWithLoss(n.conv1_1_D, n.label, loss_param={'ignore_label': IGNORE_LABEL})
        n.accuracy = L.Accuracy(n.conv1_1_D, n.label)
    return n
コード例 #18
0
    'test_interval': 2000,
}

# Check file.
check_if_exist(train_data)
check_if_exist(test_data)
make_if_not_exist(save_dir)
make_if_not_exist(job_dir)
make_if_not_exist(snapshot_dir)

# Create train net
train_net = caffe.NetSpec()
train_net.data, train_net.label = L.Data(
    source=train_data,
    backend=P.Data.LMDB,
    batch_size=train_batch_size,
    ntop=2,
    transform_param=dict(crop_size=crop_size, mean_file=mean_file,
                         mirror=True),
    include=dict(phase=caffe_pb2.Phase.Value('TRAIN')))

ZFNetBody(train_net, from_layer='data', for_training=True)

with open(train_net_file, 'w') as f:
    print('name: "{}_train"'.format(model_name), file=f)
    print(train_net.to_proto(), file=f)
shutil.copy(train_net_file, job_dir)

# Create test net
test_net = caffe.NetSpec()
test_net.data, test_net.label = L.Data(
    source=test_data,
コード例 #19
0
def gen_net(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             transform_param=dict(scale=1./255), ntop=2)
    return n.to_proto()
コード例 #20
0
def vgg_16(lmdb, bs_train=32, bs_val=10, lmdb_flag=True, not_deploy=True):
    n = caffe.NetSpec()
    if not_deploy:
        if lmdb_flag:
            n.data, n.label = L.Data(source=lmdb + 'cub200_2011_train_lmdb',
                                     backend=P.Data.LMDB,
                                     include=dict(phase=caffe_pb2.TRAIN),
                                     batch_size=bs_train,
                                     ntop=2,
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[110, 127, 123],
                                         mirror=True))
            data_str = n.to_proto()
            n.data, n.label = L.Data(source=lmdb + 'cub200_2011_val_lmdb',
                                     backend=P.Data.LMDB,
                                     include=dict(phase=caffe_pb2.TEST),
                                     batch_size=bs_val,
                                     ntop=2,
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[110, 127, 123],
                                         mirror=False))
        else:
            n.data, n.label = L.Data(source=lmdb + 'cub200_2011_train_leveldb',
                                     backend=P.Data.LEVELDB,
                                     include=dict(phase=caffe_pb2.TRAIN),
                                     batch_size=bs_train,
                                     ntop=2,
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[110, 127, 123],
                                         mirror=True))
            data_str = n.to_proto()
            n.data, n.label = L.Data(source=lmdb + 'cub200_2011_val_leveldb',
                                     backend=P.Data.LEVELDB,
                                     include=dict(phase=caffe_pb2.TEST),
                                     batch_size=bs_val,
                                     ntop=2,
                                     transform_param=dict(
                                         crop_size=224,
                                         mean_value=[110, 127, 123],
                                         mirror=False))
    else:
        data_str = 'input: "data"\ninput_dim: 1\ninput_dim: 3\ninput_dim: 224\ninput_dim: 224'
        n.data = L.Data()

    # the net itself
    n.conv1_1, n.relu1_1 = conv_relu(n.data, nout=64, pad=1, ks=3)
    n.conv1_2, n.relu1_2 = conv_relu(n.relu1_1, nout=64, pad=1, ks=3)
    n.pool1 = max_pool(n.relu1_2, ks=2, stride=2)

    n.conv2_1, n.relu2_1 = conv_relu(n.pool1, nout=128, pad=1, ks=3)
    n.conv2_2, n.relu2_2 = conv_relu(n.relu2_1, nout=128, pad=1, ks=3)
    n.pool2 = max_pool(n.relu2_2, ks=2, stride=2)

    n.conv3_1, n.relu3_1 = conv_relu(n.pool2, nout=256, pad=1, ks=3)
    n.conv3_2, n.relu3_2 = conv_relu(n.relu3_1, nout=256, pad=1, ks=3)
    n.conv3_3, n.relu3_3 = conv_relu(n.relu3_2, nout=256, pad=1, ks=3)
    n.pool3 = max_pool(n.relu3_3, ks=2, stride=2)

    n.conv4_1, n.relu4_1 = conv_relu(n.pool3, nout=512, pad=1, ks=3)
    n.conv4_2, n.relu4_2 = conv_relu(n.relu4_1, nout=512, pad=1, ks=3)
    n.conv4_3, n.relu4_3 = conv_relu(n.relu4_2, nout=512, pad=1, ks=3)
    n.pool4 = max_pool(n.relu4_3, ks=2, stride=2)

    n.conv5_1, n.relu5_1 = conv_relu(n.pool4, nout=512, pad=1, ks=3)
    n.conv5_2, n.relu5_2 = conv_relu(n.relu5_1, nout=512, pad=1, ks=3)
    n.conv5_3, n.relu5_3 = conv_relu(n.relu5_2, nout=512, pad=1, ks=3)
    n.pool5 = ave_pool(n.relu5_3, ks=14, stride=1)

    n.softmax = L.Convolution(n.pool5,
                              kernel_size=1,
                              num_output=200,
                              param=[
                                  dict(lr_mult=10, decay_mult=10),
                                  dict(lr_mult=20, decay_mult=0)
                              ])
    if not_deploy:
        n.loss = L.SoftmaxWithLoss(n.softmax, n.label)
        n.acc_top_1 = L.Accuracy(n.softmax, n.label, top_k=1)
    else:
        n.prob = L.Softmax(n.softmax)
    model_str = str(n.to_proto())
    if not not_deploy:
        model_str = model_str[54:-1]
    return str(data_str) + '\n' + model_str
コード例 #21
0
    net.relu1 = L.ReLU(net.ip1, in_place=True)
    net.ip2 = L.InnerProduct(net.relu1, name='ip2', num_output=10, **kwargs)

caffe_root = '../../'
# caffe_root = 'E:/Code/Github/windows_caffe/'
model_root = caffe_root + 'models/mnist/'

# 训练数据
train_data = caffe_root + "data/mnist/mnist_train_lmdb"
# 测试数据
test_data = caffe_root + "data/mnist/mnist_test_lmdb"

# 训练网络
train_net = caffe.NetSpec()  # 基础网络
# 带标签的数据输入层
train_net.data, train_net.label = L.Data(source=train_data,backend=P.Data.LMDB, batch_size=64,ntop=2,transform_param=dict(scale=0.00390625))
# 生成LeNet5的主体结构
lenet5_body(train_net, 'data')
# 生成误差损失层
train_net.loss = L.SoftmaxWithLoss(train_net.ip2, train_net.label)

# 测试网络
test_net = caffe.NetSpec()  # 基础网络
# 带标签的数据输入层
test_net.data, test_net.label = L.Data(source=test_data, batch_size=100, backend=P.Data.LMDB, ntop=2,transform_param=dict(scale=0.00390625))
# 生成LeNet5的主体结构
lenet5_body(test_net, 'data')
# 生成误差损失层
test_net.loss = L.SoftmaxWithLoss(test_net.ip2, test_net.label)
# 添加一个精确层
test_net.accuracy = L.Accuracy(test_net.ip2, test_net.label)
コード例 #22
0
def construct_fcn(image_lmdb, contour_lmdb, batch_size=1, include_acc=False):
    net = caffe.NetSpec()

    # args for convlution layers
    weight_filler = dict(type='gaussian', mean=0.0, std=0.01)
    bias_filler = dict(type='constant', value=0.1)
    param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]

    net.data = L.Data(source=image_lmdb,
                      backend=P.Data.LMDB,
                      batch_size=batch_size,
                      ntop=1,
                      transform_param=dict(crop_size=0,
                                           mean_value=[77],
                                           mirror=False))
    net.label = L.Data(source=contour_lmdb,
                       backend=P.Data.LMDB,
                       batch_size=batch_size,
                       ntop=1)
    # conv-relu-pool 1
    net.conv1 = L.Convolution(net.data,
                              kernel_size=5,
                              stride=2,
                              num_output=100,
                              pad=50,
                              group=1,
                              weight_filler=weight_filler,
                              bias_filler=bias_filler,
                              param=param)
    net.relu1 = L.ReLU(net.conv1, in_place=True)
    net.pool1 = L.Pooling(net.relu1,
                          pool=P.Pooling.MAX,
                          kernel_size=2,
                          stride=2)
    # conv-relu-pool 2
    net.conv2 = L.Convolution(net.pool1,
                              kernel_size=5,
                              stride=2,
                              num_output=200,
                              pad=0,
                              group=1,
                              weight_filler=weight_filler,
                              bias_filler=bias_filler,
                              param=param)
    net.relu2 = L.ReLU(net.conv2, in_place=True)
    net.pool2 = L.Pooling(net.relu2,
                          pool=P.Pooling.MAX,
                          kernel_size=2,
                          stride=2)
    net.conv3 = L.Convolution(net.pool2,
                              kernel_size=3,
                              stride=1,
                              num_output=300,
                              pad=0,
                              group=1,
                              weight_filler=weight_filler,
                              bias_filler=bias_filler,
                              param=param)
    net.relu3 = L.ReLU(net.conv3, in_place=True)
    net.conv4 = L.Convolution(net.relu3,
                              kernel_size=3,
                              stride=1,
                              num_output=300,
                              pad=0,
                              group=1,
                              weight_filler=weight_filler,
                              bias_filler=bias_filler,
                              param=param)
    net.relu4 = L.ReLU(net.conv4, in_place=True)
    net.drop = L.Dropout(net.relu4, dropout_ratio=0.1, in_place=True)
    net.score_classes = L.Convolution(net.drop,
                                      kernel_size=1,
                                      stride=1,
                                      num_output=2,
                                      pad=0,
                                      group=1,
                                      weight_filler=weight_filler,
                                      bias_filler=bias_filler,
                                      param=param)
    net.upscore = L.Deconvolution(net.score_classes)
    net.score = L.Crop(net.upscore, net.data)
    net.loss = L.SoftmaxWithLoss(net.score,
                                 net.label,
                                 loss_param=dict(normalize=True))
    if include_acc:
        net.accuracy = L.Accuracy(net.score, net.label)

    return net.to_proto()
コード例 #23
0
    def inception_resnet_v2_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=299,
                                     mean_value=[104, 117, 123],
                                     mirror=mirror))

        # stem
        n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_s2_scale, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, \
        n.conv2_3x3_s1_scale, n.conv2_3x3_relu, n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_s1_scale, n.conv3_3x3_relu, \
        n.pool1_3x3_s2, n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_3x3_reduce_scale, n.conv4_3x3_reduce_relu, \
        n.conv4_3x3, n.conv4_3x3_bn, n.conv4_3x3_scale, n.conv4_relu_3x3, n.pool2_3x3_s2, n.conv5_1x1, n.conv5_1x1_bn, n.conv5_1x1_scale, \
        n.conv5_1x1_relu, n.conv5_5x5_reduce, n.conv5_5x5_reduce_bn, n.conv5_5x5_reduce_scale, n.conv5_5x5_reduce_relu, \
        n.conv5_5x5, n.conv5_5x5_bn, n.conv5_5x5_scale, n.conv5_5x5_relu, n.conv5_3x3_reduce, n.conv5_3x3_reduce_bn, n.conv5_3x3_reduce_scale, \
        n.conv5_3x3_reduce_relu, n.conv5_3x3, n.conv5_3x3_bn, n.conv5_3x3_scale, n.conv5_3x3_relu, n.conv5_3x3_2, n.conv5_3x3_2_bn, \
        n.conv5_3x3_2_scale, n.conv5_3x3_2_relu, n.ave_pool, n.conv5_1x1_ave, n.conv5_1x1_ave_bn, n.conv5_1x1_ave_scale, n.conv5_1x1_ave_relu, \
        n.stem_concat = stem_resnet_v2_299x299(n.data)  # 320x35x35

        # 10 x inception_resnet_v2_a
        for i in xrange(10):
            if i == 0:
                bottom = 'n.stem_concat'
            else:
                bottom = 'n.inception_resnet_v2_a(order)_residual_eltwise'.replace(
                    '(order)', str(i))
            exec(
                string_a.replace('(order)',
                                 str(i + 1)).replace('bottom',
                                                     bottom))  # 384x35x35

        # reduction_resnet_v2_a
        n.reduction_a_3x3, n.reduction_a_3x3_bn, n.reduction_a_3x3_scale, n.reduction_a_3x3_relu, \
        n.reduction_a_3x3_2_reduce, n.reduction_a_3x3_2_reduce_bn, n.reduction_a_3x3_2_reduce_scale, \
        n.reduction_a_3x3_2_reduce_relu, n.reduction_a_3x3_2, n.reduction_a_3x3_2_bn, n.reduction_a_3x3_2_scale, \
        n.reduction_a_3x3_2_relu, n.reduction_a_3x3_3, n.reduction_a_3x3_3_bn, n.reduction_a_3x3_3_scale, \
        n.reduction_a_3x3_3_relu, n.reduction_a_pool, n.reduction_a_concat = \
            reduction_resnet_v2_a(n.inception_resnet_v2_a10_residual_eltwise)  # 1088x17x17

        # 20 x inception_resnet_v2_b
        for i in xrange(20):
            if i == 0:
                bottom = 'n.reduction_a_concat'
            else:
                bottom = 'n.inception_resnet_v2_b(order)_residual_eltwise'.replace(
                    '(order)', str(i))
            exec(
                string_b.replace('(order)',
                                 str(i + 1)).replace('bottom',
                                                     bottom))  # 1088x17x17

        # reduction_resnet_v2_b
        n.reduction_b_3x3_reduce, n.reduction_b_3x3_reduce_bn, n.reduction_b_3x3_reduce_scale, \
        n.reduction_b_3x3_reduce_relu, n.reduction_b_3x3, n.reduction_b_3x3_bn, n.reduction_b_3x3_scale, \
        n.reduction_b_3x3_relu, n.reduction_b_3x3_2_reduce, n.reduction_b_3x3_2_reduce_bn, n.reduction_b_3x3_2_reduce_scale, \
        n.reduction_b_3x3_2_reduce_relu, n.reduction_b_3x3_2, n.reduction_b_3x3_2_bn, n.reduction_b_3x3_2_scale, \
        n.reduction_b_3x3_2_relu, n.reduction_b_3x3_3_reduce, n.reduction_b_3x3_3_reduce_bn, n.reduction_b_3x3_3_reduce_scale, \
        n.reduction_b_3x3_3_reduce_relu, n.reduction_b_3x3_3, n.reduction_b_3x3_3_bn, n.reduction_b_3x3_3_scale, \
        n.reduction_b_3x3_3_relu, n.reduction_b_3x3_4, n.reduction_b_3x3_4_bn, n.reduction_b_3x3_4_scale, \
        n.reduction_b_3x3_4_relu, n.reduction_b_pool, n.reduction_b_concat = \
            reduction_resnet_v2_b(n.inception_resnet_v2_b20_residual_eltwise)  # 2080x8x8

        # 9 x inception_resnet_v2_c
        for i in xrange(9):
            if i == 0:
                bottom = 'n.reduction_b_concat'
            else:
                bottom = 'n.inception_resnet_v2_c(order)_residual_eltwise'.replace(
                    '(order)', str(i))
            exec(
                string_c.replace('(order)',
                                 str(i + 1)).replace('bottom',
                                                     bottom))  # 2080x8x8

        n.inception_resnet_v2_c10_1x1, n.inception_resnet_v2_c10_1x1_bn, n.inception_resnet_v2_c10_1x1_scale, \
        n.inception_resnet_v2_c10_1x1_relu = \
            factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
                                             kernel_size=1)  # 192x8x8

        n.inception_resnet_v2_c10_1x3_reduce, n.inception_resnet_v2_c10_1x3_reduce_bn, \
        n.inception_resnet_v2_c10_1x3_reduce_scale, n.inception_resnet_v2_c10_1x3_reduce_relu = \
            factorization_conv_bn_scale_relu(n.inception_resnet_v2_c9_residual_eltwise, num_output=192,
                                             kernel_size=1)  # 192x8x8
        n.inception_resnet_v2_c10_1x3, n.inception_resnet_v2_c10_1x3_bn, n.inception_resnet_v2_c10_1x3_scale, \
        n.inception_resnet_v2_c10_1x3_relu = \
            factorization_conv_mxn(n.inception_resnet_v2_c10_1x3_reduce, num_output=224, kernel_h=1, kernel_w=3,
                                   pad_h=0, pad_w=1)  # 224x8x8
        n.inception_resnet_v2_c10_3x1, n.inception_resnet_v2_c10_3x1_bn, n.inception_resnet_v2_c10_3x1_scale, \
        n.inception_resnet_v2_c10_3x1_relu = \
            factorization_conv_mxn(n.inception_resnet_v2_c10_1x3, num_output=256, kernel_h=3, kernel_w=1, pad_h=1,
                                   pad_w=0)  # 256x8x8

        n.inception_resnet_v2_c10_concat = L.Concat(
            n.inception_resnet_v2_c10_1x1,
            n.inception_resnet_v2_c10_3x1)  # 448(192+256)x8x8
        n.inception_resnet_v2_c10_up, n.inception_resnet_v2_c10_up_bn, n.inception_resnet_v2_c10_up_scale = \
            factorization_conv_bn_scale(n.inception_resnet_v2_c10_concat, num_output=2080,
                                        kernel_size=1)  # 2080x8x8

        n.inception_resnet_v2_c10_residual_eltwise = \
            L.Eltwise(n.inception_resnet_v2_c9_residual_eltwise, n.inception_resnet_v2_c10_up,
                      eltwise_param=dict(operation=1))  # 2080x8x8

        n.conv6_1x1, n.conv6_1x1_bn, n.conv6_1x1_scale, n.conv6_1x1_relu = \
            factorization_conv_bn_scale_relu(n.inception_resnet_v2_c10_residual_eltwise, num_output=1536,
                                             kernel_size=1)  # 1536x8x8

        n.pool_8x8_s1 = L.Pooling(n.conv6_1x1,
                                  pool=P.Pooling.AVE,
                                  global_pooling=True)  # 1536x1x1
        n.pool_8x8_s1_drop = L.Dropout(n.pool_8x8_s1,
                                       dropout_param=dict(dropout_ratio=0.2))
        n.classifier = L.InnerProduct(n.pool_8x8_s1_drop,
                                      num_output=self.classifier_num,
                                      param=[
                                          dict(lr_mult=1, decay_mult=1),
                                          dict(lr_mult=2, decay_mult=0)
                                      ],
                                      weight_filler=dict(type='xavier'),
                                      bias_filler=dict(type='constant',
                                                       value=0))
        n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.classifier,
                                         n.label,
                                         include=dict(phase=1),
                                         accuracy_param=dict(top_k=5))

        return n.to_proto()
コード例 #24
0
def densenet(mode, data_file, bs, nlayer, nclass, first_nout=16, growth_rate=16, dropout=0.2):

  net = caffe.NetSpec()

  # data layer ---------------------------------------------------------------
  mirror  = True
  shuffle = True
  if mode == 1: # TEST phase
    mirror  = False
    shuffle = False
  
  transform = dict(scale = 0.0078125,
                    mirror = mirror,
                    #crop_size = 224,
                    mean_value = [127.5, 127.5, 127.5])

  net.data, net.label = L.Data(#include = dict(phase = mode),
                              transform_param = transform,
                              source = data_file,
                              batch_size = bs, 
                              backend = P.Data.LMDB,
                              ntop = 2)
  # net.data, net.label = L.ImageData(#include = dict(phase = mode),
                                    # transform_param = transform,
                                    # source = data_file,
                                    # batch_size = bs,
                                    # shuffle = shuffle,
                                    # #new_height = 256,
                                    # #new_width = 256,
                                    # #is_color = True,
                                    # ntop = 2)

  pre_fmap = 0 # total number of previous feature maps
  
  # first convolution --------------------------------------------------------
  net.conv_1 = L.Convolution(net.data, num_output=first_nout,
                             kernel_size=7, stride=2, pad=3, 
                             weight_filler=dict(type='msra'), 
                             bias_filler=dict(type='constant'),
                             param=[dict(lr_mult=1, decay_mult=1),
                                    dict(lr_mult=2, decay_mult=0)])
  
  net.relu_1 = L.PReLU(net.conv_1, in_place=True)
  
  net.pool_1 = L.Pooling(net.relu_1, pool=P.Pooling.MAX,
                         kernel_size=3, stride=2)
  
  pre_layer = net.pool_1
  pre_fmap += first_nout
  
  # DB + TD ------------------------------------------------------------------
  # +1 in order to make the index values from 1
  for major in xrange(len(nlayer)-1):
    # DB
    for minor in xrange(nlayer[major]):
      pre_layer = cat_layer(net, mode, major+1, minor+1, pre_layer, growth_rate, dropout)
      pre_fmap += growth_rate
    # TD
    pre_layer = transition_down(net, mode, major+1, pre_layer, pre_fmap, dropout)
    pre_fmap = pre_fmap // 2
  
  # last DB, without TD
  major = len(nlayer)
  for minor in xrange(nlayer[-1]):
    pre_layer = cat_layer(net, mode, major, minor+1, pre_layer, growth_rate, dropout)
    pre_fmap += growth_rate
  
  # final layers -------------------------------------------------------------
  use_global_stats = False
  if mode == 1: # TEST phase
    use_global_stats = True
  net.bn_final = L.BatchNorm(pre_layer, in_place=False, 
                             batch_norm_param = dict(use_global_stats=use_global_stats),
                             param=[dict(lr_mult=0, decay_mult=0), 
                                    dict(lr_mult=0, decay_mult=0), 
                                    dict(lr_mult=0, decay_mult=0)])
  net.scale_finel = L.Scale(net.bn_final, bias_term=True, in_place=True, 
                            filler=dict(value=1), bias_filler=dict(value=0))
  net.relu_final = L.PReLU(net.scale_finel, in_place=True)
  net.pool_final = L.Pooling(net.relu_final, pool=P.Pooling.AVE, global_pooling=True)
  
  net.fc_class = L.InnerProduct(net.pool_final, num_output=nclass,
                                weight_filler=dict(type='xavier'), 
                                bias_filler=dict(type='constant'),
                                param=[dict(lr_mult=1, decay_mult=1),
                                       dict(lr_mult=2, decay_mult=0)])
  
  net.loss = L.SoftmaxWithLoss(net.fc_class, net.label)
  
  if mode == 1:
    net.accuracy = L.Accuracy(net.fc_class, net.label)
  
  return str(net.to_proto())
コード例 #25
0
def make_resnet(training_data='train_data_path',
                test_data='test_data_path',
                mean_file='mean.binaryproto',
                depth=50):

    # num_feature_maps = np.array([16, 32, 64]) # feature map size: [32, 16, 8]
    configs = {
        50: [3, 4, 6, 3],
        101: [3, 4, 23, 3],
        152: [3, 8, 36, 3],
        200: [3, 24, 36, 3],
    }
    block_config = configs[depth]
    num_feature_maps = [64, 128, 256, 512]
    n_stage = len(num_feature_maps)

    n = caffe.NetSpec()
    # make training data layer
    n.data, n.label = L.Data(source=training_data,
                             backend=P.Data.LMDB,
                             batch_size=256,
                             ntop=2,
                             transform_param=dict(crop_size=224,
                                                  mean_file=mean_file,
                                                  mirror=True),
                             image_data_param=dict(shuffle=True),
                             include=dict(phase=0))
    # make test data layer
    n.test_data, n.test_label = L.Data(source=test_data,
                                       backend=P.Data.LMDB,
                                       batch_size=100,
                                       ntop=2,
                                       transform_param=dict(
                                           crop_size=224,
                                           mean_file=mean_file,
                                           mirror=False),
                                       include=dict(phase=1))
    # conv1 should accept both training and test data layers. But this is inconvenient to code in pycaffe.
    # You have to write two conv layers for them. To deal with this, I temporarily ignore the test data layer
    # and let conv1 accept the output of training data layer. Then, after making the whole prototxt, I postprocess
    # the top name of the two data layers, renaming their names to the same.

    n.conv = L.Convolution(
        n.data,
        kernel_size=7,
        stride=2,
        num_output=64,
        pad=3,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=weight_filler,
        bias_filler=bias_filler)
    n.bn = L.BatchNorm(n.conv, in_place=True)
    n.scale = L.Scale(n.bn, scale_param=dict(bias_term=True), in_place=True)
    n.relu = L.ReLU(n.scale, in_place=True)

    n.max_pooling = L.Pooling(n.relu,
                              pool=P.Pooling.MAX,
                              kernel_size=3,
                              stride=2,
                              pad=0)
    # set up a checkpoint so as to know where we get.
    checkpoint = 'n.max_pooling'

    # start making blocks.
    # num_feature_maps: the number of feature maps for each stage. Default is [16,32,64],
    #                   suggesting the network has three stages.
    # num_block_in_stage: a parameter from the original paper, telling us how many blocks there are in
    #                   each stage.
    # depth           :
    for i in range(n_stage):
        num_map = num_feature_maps[i]
        nblocks = block_config[i]
        if (i == 0):
            stride = 1
        else:
            stride = 2
        for res in range(nblocks):
            # stage name
            stage = 'blk' + str(res + 1) + '_stg' + str(i + 1)
            # use the projecting block when downsample the feature map
            if res == 0:
                make_res = 'n.' + 'conv_' + stage + '_proj,' + \
                           'n.' + 'bn_' + stage + '_proj,' + \
                           'n.' + 'scale_' + stage + '_proj,' + \
                           'n.' + 'conv_' + stage + '_a,' + \
                           'n.' + 'bn_' + stage + '_a, ' + \
                           'n.' + 'scale_' + stage + '_a, ' + \
                           'n.' + 'relu_' + stage + '_a, ' + \
                           'n.' + 'conv_' + stage + '_b, ' + \
                           'n.' + 'bn_' + stage + '_b, ' + \
                           'n.' + 'scale_' + stage + '_b, ' + \
                           'n.' + 'relu_' + stage + '_b, ' + \
                           'n.' + 'conv_' + stage + '_c, ' + \
                           'n.' + 'bn_' + stage + '_c, ' + \
                           'n.' + 'scale_' + stage + '_c, ' + \
                           'n.' + 'se_pool_' + stage + '_c, ' + \
                           'n.' + 'se_reduce_' + stage + ', ' + \
                           'n.' + 'se_relu_' + stage + ', ' + \
                           'n.' + 'se_recover_' + stage + ', ' + \
                           'n.' + 'se_sigmoid_' + stage + ', ' + \
                           'n.' + 'se_scale_' + stage + ', ' + \
                           'n.' + 'eltsum_' + stage + ', ' + \
                           'n.' + 'relu_after_sum_' + stage + \
                           ' = project_residual(' + checkpoint + ', num_out=num_map, stride=' + str(stride) + ')'
                exec(make_res)
                checkpoint = 'n.' + 'relu_after_sum_' + stage  # where we get
                continue

            # most blocks have this shape
            make_res = 'n.' + 'conv_' + stage + '_a, ' + \
                       'n.' + 'bn_' + stage + '_a, ' + \
                       'n.' + 'scale_' + stage + '_a, ' + \
                       'n.' + 'relu_' + stage + '_a, ' + \
                       'n.' + 'conv_' + stage + '_b, ' + \
                       'n.' + 'bn_' + stage + '_b, ' + \
                       'n.' + 'scale_' + stage + '_b, ' + \
                       'n.' + 'relu_' + stage + '_b, ' + \
                       'n.' + 'conv_' + stage + '_c, ' + \
                       'n.' + 'bn_' + stage + '_c, ' + \
                       'n.' + 'scale_' + stage + '_c, ' + \
                       'n.' + 'se_pool_' + stage + '_d, ' + \
                       'n.' + 'se_reduce_' + stage + ', ' + \
                       'n.' + 'se_relu_' + stage + ', ' + \
                       'n.' + 'se_recover_' + stage + ', ' + \
                       'n.' + 'se_sigmoid_' + stage + ', ' + \
                       'n.' + 'se_scale_' + stage + ', ' + \
                       'n.' + 'eltsum_' + stage + ', ' + \
                       'n.' + 'relu_after_sum_' + stage + \
                       ' = identity_residual(' + checkpoint + ', num_out=num_map, stride=1)'
            exec(make_res)
            checkpoint = 'n.' + 'relu_after_sum_' + stage  # where we get

    # add the pooling layer
    exec('n.pool_global = L.Pooling(' + checkpoint +
         ', pool=P.Pooling.AVE, global_pooling=True)')
    n.score = L.InnerProduct(
        n.pool_global,
        num_output=1000,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)],
        weight_filler=dict(type='gaussian', std=0.01),
        bias_filler=dict(type='constant', value=0))
    n.loss = L.SoftmaxWithLoss(n.score, n.label)
    n.acc = L.Accuracy(n.score, n.label)

    return n.to_proto()
コード例 #26
0
    def inception_v3_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(
                                     crop_size=299,
                                     mean_value=[104, 117, 123],
                                     mirror=mirror))
        # stage 1
        n.conv1_3x3_s2, n.conv1_3x3_s2_bn, n.conv1_3x3_relu, n.conv2_3x3_s1, n.conv2_3x3_s1_bn, n.conv2_3x3_relu, \
        n.conv3_3x3_s1, n.conv3_3x3_s1_bn, n.conv3_3x3_relu = \
            conv_bn_stack_3(n.data, dict(num_output=[32, 32, 64], kernel_size=[3, 3, 3], stride=[2, 1, 1],
                                         pad=[0, 0, 1], group=[1, 1, 1], weight_type=['xavier', 'xavier', 'xavier'],
                                         weight_std=[0.01, 0.01, 0.01],
                                         bias_type=['constant', 'constant', 'constant'], bias_value=[0.2, 0.2, 0.2]))
        n.pool1_3x3_s2 = L.Pooling(n.conv3_3x3_s1_bn,
                                   kernel_size=3,
                                   stride=2,
                                   pool=P.Pooling.MAX)
        # stage 2
        n.conv4_3x3_reduce, n.conv4_3x3_reduce_bn, n.conv4_relu_3x3_reduce, n.conv4_3x3, n.conv4_3x3_bn, n.conv4_relu_3x3 = \
            conv_bn_stack_2(n.pool1_3x3_s2,
                            dict(num_output=[80, 192], kernel_size=[1, 3], stride=[1, 1], pad=[0, 0], group=[1, 1],
                                 weight_type=['xavier', 'xavier'], weight_std=[0.01, 0.01],
                                 bias_type=['constant', 'constant'], bias_value=[0.2, 0.2]))
        n.pool2_3x3_s2 = L.Pooling(n.conv4_3x3_bn,
                                   kernel_size=3,
                                   stride=2,
                                   pool=P.Pooling.MAX)

        # stage 3
        n.inception_3a_1x1, n.inception_3a_1x1_bn, n.inception_3a_relu_1x1, n.inception_3a_5x5_reduce, \
        n.inception_3a_5x5_reduce_bn, n.inception_3a_relu_5x5_reduce, n.inception_3a_5x5, n.inception_3a_5x5_bn, \
        n.inception_3a_relu_5x5, n.inception_3a_3x3_reduce, n.inception_3a_3x3_reduce_bn, n.inception_3a_relu_3x3_reduce, \
        n.inception_3a_3x3_1, n.inception_3a_3x3_1_bn, n.inception_3a_relu_3x3_1, n.inception_3a_3x3_2, \
        n.inception_3a_3x3_2_bn, n.inception_3a_relu_3x3_2, n.inception_3a_pool, n.inception_3a_pool_proj, \
        n.inception_3a_pool_proj_bn, n.inception_3a_relu_pool_proj, n.inception_3a_output = \
            inception_v3_7a(n.pool2_3x3_s2,
                            dict(conv_1x1=64, conv_5x5_reduce=48, conv_5x5=64, conv_3x3_reduce=64, conv_3x3_1=96,
                                 conv_3x3_2=96, pool_proj=32))
        n.inception_3b_1x1, n.inception_3b_1x1_bn, n.inception_3b_relu_1x1, n.inception_3b_5x5_reduce, \
        n.inception_3b_5x5_reduce_bn, n.inception_3b_relu_5x5_reduce, n.inception_3b_5x5, n.inception_3b_5x5_bn, \
        n.inception_3b_relu_5x5, n.inception_3b_3x3_reduce, n.inception_3b_3x3_reduce_bn, n.inception_3b_relu_3x3_reduce, \
        n.inception_3b_3x3_1, n.inception_3b_3x3_1_bn, n.inception_3b_relu_3x3_1, n.inception_3b_3x3_2, \
        n.inception_3b_3x3_2_bn, n.inception_3b_relu_3x3_2, n.inception_3b_pool, n.inception_3b_pool_proj, \
        n.inception_3b_pool_proj_bn, n.inception_3b_relu_pool_proj, n.inception_3b_output = \
            inception_v3_7a(n.inception_3a_output,
                            dict(conv_1x1=64, conv_5x5_reduce=48, conv_5x5=64, conv_3x3_reduce=64, conv_3x3_1=96,
                                 conv_3x3_2=96, pool_proj=64))
        n.inception_3c_1x1, n.inception_3c_1x1_bn, n.inception_3c_relu_1x1, n.inception_3c_5x5_reduce, \
        n.inception_3c_5x5_reduce_bn, n.inception_3c_relu_5x5_reduce, n.inception_3c_5x5, n.inception_3c_5x5_bn, \
        n.inception_3c_relu_5x5, n.inception_3c_3x3_reduce, n.inception_3c_3x3_reduce_bn, n.inception_3c_relu_3x3_reduce, \
        n.inception_3c_3x3_1, n.inception_3c_3x3_1_bn, n.inception_3c_relu_3x3_1, n.inception_3c_3x3_2, \
        n.inception_3c_3x3_2_bn, n.inception_3c_relu_3x3_2, n.inception_3c_pool, n.inception_3c_pool_proj, \
        n.inception_3c_pool_proj_bn, n.inception_3c_relu_pool_proj, n.inception_3c_output = \
            inception_v3_7a(n.inception_3b_output,
                            dict(conv_1x1=64, conv_5x5_reduce=48, conv_5x5=64, conv_3x3_reduce=64, conv_3x3_1=96,
                                 conv_3x3_2=96, pool_proj=64))
        n.inception_3d_3x3_0, n.inception_3d_3x3_0_bn, n.inception_3d_relu_3x3_0, n.inception_3d_3x3_reduce, \
        n.inception_3d_3x3_reduce_bn, n.inception_3d_relu_3x3_reduce, n.inception_3d_3x3_1, n.inception_3d_3x3_1_bn, \
        n.inception_3d_relu_3x3_1, n.inception_3d_3x3_2, n.inception_3d_3x3_2_bn, n.inception_3d_relu_3x3_2, \
        n.inception_3d_pool, n.inception_3d_output = \
            inception_v3_7b(n.inception_3c_output,
                            dict(conv_3x3_0=384, conv_3x3_reduce=64, conv_3x3_1=96, conv_3x3_2=96))

        # stage 4
        n.inception_4a_1x1, n.inception_4a_1x1_bn, n.inception_4a_relu_1x1, n.inception_4a_1x7_reduce, \
        n.inception_4a_1x7_reduce_bn, n.inception_4a_relu_1x7_reduce, n.inception_4a_1x7_0, n.inception_4a_1x7_0_bn, \
        n.inception_4a_relu_1x7_0, n.inception_4a_7x1_0, n.inception_4a_7x1_0_bn, n.inception_4a_relu_7x1_0, \
        n.inception_4a_7x1_reduce, n.inception_4a_7x1_reduce_bn, n.inception_4a_relu_7x1_reduce, \
        n.inception_4a_7x1_1, n.inception_4a_7x1_1_bn, n.inception_4a_relu_7x1_1, n.inception_4a_1x7_1, \
        n.inception_4a_1x7_1_bn, n.inception_4a_relu_1x7_1, n.inception_4a_7x1_2, n.inception_4a_7x1_2_bn, \
        n.inception_4a_relu_7x1_2, n.inception_4a_1x7_2, n.inception_4a_1x7_2_bn, n.inception_4a_relu_1x7_2, \
        n.inception_4a_pool, n.inception_4a_pool_proj, n.inception_4a_pool_proj_bn, n.inception_4a_relu_pool_proj, \
        n.inception_4a_output = \
            inception_v3_7c(n.inception_3d_output,
                            dict(conv_1x1=192, conv_1x7_reduce=128, conv_1x7_0=128, conv_7x1_0=192, conv_7x1_reduce=128,
                                 conv_1x7_1=128, conv_7x1_1=128, conv_1x7_2=128, conv_7x1_2=192, pool_proj=192))
        n.inception_4b_1x1, n.inception_4b_1x1_bn, n.inception_4b_relu_1x1, n.inception_4b_1x7_reduce, \
        n.inception_4b_1x7_reduce_bn, n.inception_4b_relu_1x7_reduce, n.inception_4b_1x7_0, n.inception_4b_1x7_0_bn, \
        n.inception_4b_relu_1x7_0, n.inception_4b_7x1_0, n.inception_4b_7x1_0_bn, n.inception_4b_relu_7x1_0, \
        n.inception_4b_7x1_reduce, n.inception_4b_7x1_reduce_bn, n.inception_4b_relu_7x1_reduce, \
        n.inception_4b_7x1_1, n.inception_4b_7x1_1_bn, n.inception_4b_relu_7x1_1, n.inception_4b_1x7_1, \
        n.inception_4b_1x7_1_bn, n.inception_4b_relu_1x7_1, n.inception_4b_7x1_2, n.inception_4b_7x1_2_bn, \
        n.inception_4b_relu_7x1_2, n.inception_4b_1x7_2, n.inception_4b_1x7_2_bn, n.inception_4b_relu_1x7_2, \
        n.inception_4b_pool, n.inception_4b_pool_proj, n.inception_4b_pool_proj_bn, n.inception_4b_relu_pool_proj, \
        n.inception_4b_output = \
            inception_v3_7c(n.inception_4a_output,
                            dict(conv_1x1=192, conv_1x7_reduce=160, conv_1x7_0=160, conv_7x1_0=192, conv_7x1_reduce=160,
                                 conv_1x7_1=160, conv_7x1_1=160, conv_1x7_2=160, conv_7x1_2=160, pool_proj=192))
        n.inception_4c_1x1, n.inception_4c_1x1_bn, n.inception_4c_relu_1x1, n.inception_4c_1x7_reduce, \
        n.inception_4c_1x7_reduce_bn, n.inception_4c_relu_1x7_reduce, n.inception_4c_1x7_0, n.inception_4c_1x7_0_bn, \
        n.inception_4c_relu_1x7_0, n.inception_4c_7x1_0, n.inception_4c_7x1_0_bn, n.inception_4c_relu_7x1_0, \
        n.inception_4c_7x1_reduce, n.inception_4c_7x1_reduce_bn, n.inception_4c_relu_7x1_reduce, \
        n.inception_4c_7x1_1, n.inception_4c_7x1_1_bn, n.inception_4c_relu_7x1_1, n.inception_4c_1x7_1, \
        n.inception_4c_1x7_1_bn, n.inception_4c_relu_1x7_1, n.inception_4c_7x1_2, n.inception_4c_7x1_2_bn, \
        n.inception_4c_relu_7x1_2, n.inception_4c_1x7_2, n.inception_4c_1x7_2_bn, n.inception_4c_relu_1x7_2, \
        n.inception_4c_pool, n.inception_4c_pool_proj, n.inception_4c_pool_proj_bn, n.inception_4c_relu_pool_proj, \
        n.inception_4c_output = \
            inception_v3_7c(n.inception_4b_output,
                            dict(conv_1x1=192, conv_1x7_reduce=160, conv_1x7_0=160, conv_7x1_0=192, conv_7x1_reduce=160,
                                 conv_1x7_1=160, conv_7x1_1=160, conv_1x7_2=160, conv_7x1_2=160, pool_proj=192))
        n.inception_4d_1x1, n.inception_4d_1x1_bn, n.inception_4d_relu_1x1, n.inception_4d_1x7_reduce, \
        n.inception_4d_1x7_reduce_bn, n.inception_4d_relu_1x7_reduce, n.inception_4d_1x7_0, n.inception_4d_1x7_0_bn, \
        n.inception_4d_relu_1x7_0, n.inception_4d_7x1_0, n.inception_4d_7x1_0_bn, n.inception_4d_relu_7x1_0, \
        n.inception_4d_7x1_reduce, n.inception_4d_7x1_reduce_bn, n.inception_4d_relu_7x1_reduce, \
        n.inception_4d_7x1_1, n.inception_4d_7x1_1_bn, n.inception_4d_relu_7x1_1, n.inception_4d_1x7_1, \
        n.inception_4d_1x7_1_bn, n.inception_4d_relu_1x7_1, n.inception_4d_7x1_2, n.inception_4d_7x1_2_bn, \
        n.inception_4d_relu_7x1_2, n.inception_4d_1x7_2, n.inception_4d_1x7_2_bn, n.inception_4d_relu_1x7_2, \
        n.inception_4d_pool, n.inception_4d_pool_proj, n.inception_4d_pool_proj_bn, n.inception_4d_relu_pool_proj, \
        n.inception_4d_output = \
            inception_v3_7c(n.inception_4c_output,
                            dict(conv_1x1=192, conv_1x7_reduce=192, conv_1x7_0=192, conv_7x1_0=192, conv_7x1_reduce=192,
                                 conv_1x7_1=192, conv_7x1_1=192, conv_1x7_2=192, conv_7x1_2=192, pool_proj=192))
        n.inception_4e_3x3_reduce, n.inception_4e_3x3_reduce_bn, n.inception_4e_relu_3x3_reduce, n.inception_4e_3x3_0, \
        n.inception_4e_3x3_0_bn, n.inception_4e_relu_3x3_0, n.inception_4e_1x7_reduce, n.inception_4e_1x7_reduce_bn, \
        n.inception_4e_relu_1x7_reduce, n.inception_4e_1x7, n.inception_4e_1x7_bn, n.inception_4e_relu_1x7, \
        n.inception_4e_7x1, n.inception_4e_7x1_bn, n.inception_4e_relu_7x1, n.inception_4e_3x3_1, \
        n.inception_4e_3x3_1_bn, n.inception_4e_relu_3x3_1, n.inception_4e_pool, n.inception_4e_output = \
            inception_v3_7d(n.inception_4d_output,
                            dict(conv_3x3_reduce=192, conv_3x3_0=320, conv_1x7_reduce=192, conv_1x7=192, conv_7x1=192,
                                 conv_3x3_1=192))

        # stage 5
        n.inception_5a_1x1, n.inception_5a_1x1_bn, n.inception_5a_relu_1x1, n.inception_5a_3x3_0_reduce, \
        n.inception_5a_3x3_0_reduce_bn, n.inception_5a_relu_3x3_0_reduce, n.inception_5a_1x3_0, n.inception_5a_1x3_0_bn, \
        n.inception_5a_relu_1x3_0, n.inception_5a_3x1_0, n.inception_5a_3x1_0_bn, n.inception_5a_relu_3x1_0, \
        n.inception_5a_3x3_1_reduce, n.inception_5a_3x3_1_reduce_bn, n.inception_5a_relu_3x3_1_reduce, n.inception_5a_3x3_1, \
        n.inception_5a_3x3_1_bn, n.inception_5a_relu_3x3_1, n.inception_5a_1x3_1, n.inception_5a_1x3_1_bn, \
        n.inception_5a_relu_1x3_1, n.inception_5a_3x1_1, n.inception_5a_3x1_1_bn, n.inception_5a_relu_3x1_1, \
        n.inception_5a_pool, n.inception_5a_pool_proj, n.inception_5a_pool_proj_bn, n.inception_5a_relu_pool_proj, \
        n.inception_5a_output = \
            inception_v3_7e(n.inception_4e_output,
                            dict(conv_1x1=320, conv_3x3_0_reduce=384, conv_1x3_0=384, conv_3x1_0=384,
                                 conv_3x3_1_reduce=448, conv_3x3_1=384, conv_1x3_1=384, conv_3x1_1=384,
                                 pooling=P.Pooling.AVE, pool_proj=192))
        n.inception_5b_1x1, n.inception_5b_1x1_bn, n.inception_5b_relu_1x1, n.inception_5b_3x3_0_reduce, \
        n.inception_5b_3x3_0_reduce_bn, n.inception_5b_relu_3x3_0_reduce, n.inception_5b_1x3_0, n.inception_5b_1x3_0_bn, \
        n.inception_5b_relu_1x3_0, n.inception_5b_3x1_0, n.inception_5b_3x1_0_bn, n.inception_5b_relu_3x1_0, \
        n.inception_5b_3x3_1_reduce, n.inception_5b_3x3_1_reduce_bn, n.inception_5b_relu_3x3_1_reduce, n.inception_5b_3x3_1, \
        n.inception_5b_3x3_1_bn, n.inception_5b_relu_3x3_1, n.inception_5b_1x3_1, n.inception_5b_1x3_1_bn, \
        n.inception_5b_relu_1x3_1, n.inception_5b_3x1_1, n.inception_5b_3x1_1_bn, n.inception_5b_relu_3x1_1, \
        n.inception_5b_pool, n.inception_5b_pool_proj, n.inception_5b_pool_proj_bn, n.inception_5b_relu_pool_proj, \
        n.inception_5b_output = \
            inception_v3_7e(n.inception_5a_output,
                            dict(conv_1x1=320, conv_3x3_0_reduce=384, conv_1x3_0=384, conv_3x1_0=384,
                                 conv_3x3_1_reduce=448, conv_3x3_1=384, conv_1x3_1=384, conv_3x1_1=384,
                                 pooling=P.Pooling.MAX, pool_proj=192))

        n.pool3_7x7_s1 = L.Pooling(n.inception_5b_output,
                                   kernel_size=7,
                                   stride=1,
                                   pool=P.Pooling.AVE)
        n.classifier = L.InnerProduct(n.pool3_7x7_s1,
                                      num_output=self.classifier_num,
                                      param=[
                                          dict(lr_mult=1, decay_mult=1),
                                          dict(lr_mult=2, decay_mult=0)
                                      ],
                                      weight_filler=dict(type='xavier'),
                                      bias_filler=dict(type='constant',
                                                       value=0))

        n.loss = L.SoftmaxWithLoss(n.classifier, n.label)
        if phase == 'TRAIN':
            pass
        else:
            n.loss_top1, n.loss_top5 = accuracy_top1_top5(
                n.classifier, n.label)
        return n.to_proto()
def create_net(phase):
    global train_transform_param
    global test_transform_param
    train_transform_param = {'mirror': False, 'mean_file': Params['mean_file']}
    test_transform_param = {'mean_file': Params['mean_file']}
    if phase == 'train':
        lmdb_file = Params['train_lmdb']
        transform_param = train_transform_param
        batch_size = Params['batch_size_per_device']
    else:
        lmdb_file = Params['test_lmdb']
        transform_param = test_transform_param
        batch_size = Params['test_batch_size']

    net = caffe.NetSpec()
    net.data, net.label = L.Data(batch_size=batch_size,
                                 backend=P.Data.LMDB,
                                 source=lmdb_file,
                                 transform_param=transform_param,
                                 ntop=2)
    #include=dict(phase=caffe_pb2.Phase.Value('TRAIN')),

    kwargs = {
        'param': [dict(lr_mult=1), dict(lr_mult=2)],
        'weight_filler': dict(type='gaussian', std=0.0001),
        'bias_filler': dict(type='constant')
    }
    net.conv1 = L.Convolution(net.data, num_output=16, kernel_size=3, **kwargs)
    net.pool1 = L.Pooling(net.conv1,
                          pool=P.Pooling.MAX,
                          kernel_size=3,
                          stride=2)
    net.relu1 = L.ReLU(net.pool1, in_place=True)
    kwargs = {
        'param':
        [dict(lr_mult=1, decay_mult=1),
         dict(lr_mult=2, decay_mult=0)],
        'weight_filler': dict(type='gaussian', std=0.005),
        'bias_filler': dict(type='constant')
    }
    net.fc2 = L.InnerProduct(net.pool1, num_output=128, **kwargs)
    net.relu2 = L.ReLU(net.fc2, in_place=True)
    net.drop2 = L.Dropout(net.fc2,
                          in_place=True,
                          dropout_param=dict(dropout_ratio=0.5))
    kwargs = {
        'param':
        [dict(lr_mult=1, decay_mult=100),
         dict(lr_mult=2, decay_mult=0)],
        'weight_filler': dict(type='gaussian', std=0.01),
        'bias_filler': dict(type='constant', value=0)
    }
    net.fc3 = L.InnerProduct(net.fc2, num_output=45, **kwargs)
    if phase == 'train':
        net.loss = L.SoftmaxWithLoss(net.fc3, net.label)
    elif phase == 'test':
        net.accuracy = L.Accuracy(net.fc3, net.label)
    else:
        net.prob = L.Softmax(net.fc3)

    net_proto = net.to_proto()
    if phase == 'deploy':
        del net_proto.layer[0]
        #del net_proto.layer[-1]
        net_proto.input.extend(['data'])
        net_proto.input_dim.extend([1, 3, 12, 36])
    net_proto.name = '{}_{}'.format(Params['model_name'], phase)
    return net_proto
コード例 #28
0
import caffe
from caffe import layers as L

temp_image, temp_label = L.Data(
    name='tw',
    source='/home/ncl/caffe/examples/mnist/mnist_leveldb',
    batch_size=128,
    include=dict(phase=caffe.TRAIN),
    ntop=2,
    backend=0)
コード例 #29
0
    def inception_v1_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
                                 transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))

        n.conv1_7x7_s2 = L.Convolution(n.data, num_output=64, kernel_size=7, stride=2, pad=3,
                                       param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                       weight_filler=dict(type='xavier', weight_std=1),
                                       bias_filler=dict(type='constant', value=0.2))
        n.conv1_relu_7x7 = L.ReLU(n.conv1_7x7_s2, in_place=True)
        n.pool1_3x3_s2 = L.Pooling(n.conv1_7x7_s2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)
        n.pool1_norm1 = L.LRN(n.pool1_3x3_s2, local_size=5, alpha=1e-4, beta=0.75)

        n.conv2_3x3_reduce = L.Convolution(n.pool1_norm1, kernel_size=1, num_output=64, stride=1,
                                           param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                           weight_filler=dict(type='xavier', weight_std=1),
                                           bias_filler=dict(type='constant', value=0.2))
        n.conv2_relu_3x3_reduce = L.ReLU(n.conv2_3x3_reduce, in_place=True)

        n.conv2_3x3 = L.Convolution(n.conv2_3x3_reduce, num_output=192, kernel_size=3, stride=1, pad=1,
                                    param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                    weight_filler=dict(type='xavier', weight_std=1),
                                    bias_filler=dict(type='constant', value=0.2))
        n.conv2_relu_3x3 = L.ReLU(n.conv2_3x3, in_place=True)
        n.conv2_norm2 = L.LRN(n.conv2_3x3, local_size=5, alpha=1e-4, beta=0.75)
        n.pool2_3x3_s2 = L.Pooling(n.conv2_norm2, kernel_size=3, stride=1, pad=1, pool=P.Pooling.MAX)

        n.inception_3a_1x1, n.inception_3a_relu_1x1, n.inception_3a_3x3_reduce, n.inception_3a_relu_3x3_reduce, \
        n.inception_3a_3x3, n.inception_3a_relu_3x3, n.inception_3a_5x5_reduce, n.inception_3a_relu_5x5_reduce, \
        n.inception_3a_5x5, n.inception_3a_relu_5x5, n.inception_3a_pool, n.inception_3a_pool_proj, \
        n.inception_3a_relu_pool_proj, n.inception_3a_output = \
            inception(n.pool2_3x3_s2, dict(conv_1x1=64, conv_3x3_reduce=96, conv_3x3=128, conv_5x5_reduce=16,
                                           conv_5x5=32, pool_proj=32))
        n.inception_3b_1x1, n.inception_3b_relu_1x1, n.inception_3b_3x3_reduce, n.inception_3b_relu_3x3_reduce, \
        n.inception_3b_3x3, n.inception_3b_relu_3x3, n.inception_3b_5x5_reduce, n.inception_3b_relu_5x5_reduce, \
        n.inception_3b_5x5, n.inception_3b_relu_5x5, n.inception_3b_pool, n.inception_3b_pool_proj, \
        n.inception_3b_relu_pool_proj, n.inception_3b_output = \
            inception(n.inception_3a_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=192, conv_5x5_reduce=32,
                                                  conv_5x5=96, pool_proj=64))
        n.pool3_3x3_s2 = L.Pooling(n.inception_3b_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
        n.inception_4a_1x1, n.inception_4a_relu_1x1, n.inception_4a_3x3_reduce, n.inception_4a_relu_3x3_reduce, \
        n.inception_4a_3x3, n.inception_4a_relu_3x3, n.inception_4a_5x5_reduce, n.inception_4a_relu_5x5_reduce, \
        n.inception_4a_5x5, n.inception_4a_relu_5x5, n.inception_4a_pool, n.inception_4a_pool_proj, \
        n.inception_4a_relu_pool_proj, n.inception_4a_output = \
            inception(n.pool3_3x3_s2, dict(conv_1x1=192, conv_3x3_reduce=96, conv_3x3=208, conv_5x5_reduce=16,
                                           conv_5x5=48, pool_proj=64))
        # loss 1
        n.loss1_ave_pool = L.Pooling(n.inception_4a_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
        n.loss1_conv = L.Convolution(n.loss1_ave_pool, num_output=128, kernel_size=1, stride=1,
                                     param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                     weight_filler=dict(type='xavier', weight_std=1),
                                     bias_filler=dict(type='constant', value=0.2))
        n.loss1_relu_conv = L.ReLU(n.loss1_conv, in_place=True)
        n.loss1_fc, n.loss1_relu_fc, n.loss1_drop_fc = \
            fc_relu_drop(n.loss1_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
                                            bias_value=0.2), dropout_ratio=0.7)
        n.loss1_classifier = L.InnerProduct(n.loss1_fc, num_output=self.classifier_num,
                                            param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                            weight_filler=dict(type='xavier'),
                                            bias_filler=dict(type='constant', value=0))
        n.loss1_loss = L.SoftmaxWithLoss(n.loss1_classifier, n.label, loss_weight=0.3)
        if phase == 'TRAIN':
            pass
        else:
            n.loss1_accuracy_top1 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1))
            n.loss1_accuracy_top5 = L.Accuracy(n.loss1_classifier, n.label, include=dict(phase=1),
                                               accuracy_param=dict(top_k=5))

        n.inception_4b_1x1, n.inception_4b_relu_1x1, n.inception_4b_3x3_reduce, n.inception_4b_relu_3x3_reduce, \
        n.inception_4b_3x3, n.inception_4b_relu_3x3, n.inception_4b_5x5_reduce, n.inception_4b_relu_5x5_reduce, \
        n.inception_4b_5x5, n.inception_4b_relu_5x5, n.inception_4b_pool, n.inception_4b_pool_proj, \
        n.inception_4b_relu_pool_proj, n.inception_4b_output = \
            inception(n.inception_4a_output, dict(conv_1x1=160, conv_3x3_reduce=112, conv_3x3=224, conv_5x5_reduce=24,
                                                  conv_5x5=64, pool_proj=64))
        n.inception_4c_1x1, n.inception_4c_relu_1x1, n.inception_4c_3x3_reduce, n.inception_4c_relu_3x3_reduce, \
        n.inception_4c_3x3, n.inception_4c_relu_3x3, n.inception_4c_5x5_reduce, n.inception_4c_relu_5x5_reduce, \
        n.inception_4c_5x5, n.inception_4c_relu_5x5, n.inception_4c_pool, n.inception_4c_pool_proj, \
        n.inception_4c_relu_pool_proj, n.inception_4c_output = \
            inception(n.inception_4b_output, dict(conv_1x1=128, conv_3x3_reduce=128, conv_3x3=256, conv_5x5_reduce=24,
                                                  conv_5x5=64, pool_proj=64))
        n.inception_4d_1x1, n.inception_4d_relu_1x1, n.inception_4d_3x3_reduce, n.inception_4d_relu_3x3_reduce, \
        n.inception_4d_3x3, n.inception_4d_relu_3x3, n.inception_4d_5x5_reduce, n.inception_4d_relu_5x5_reduce, \
        n.inception_4d_5x5, n.inception_4d_relu_5x5, n.inception_4d_pool, n.inception_4d_pool_proj, \
        n.inception_4d_relu_pool_proj, n.inception_4d_output = \
            inception(n.inception_4c_output, dict(conv_1x1=112, conv_3x3_reduce=144, conv_3x3=288, conv_5x5_reduce=32,
                                                  conv_5x5=64, pool_proj=64))
        # loss 2
        n.loss2_ave_pool = L.Pooling(n.inception_4d_output, kernel_size=5, stride=3, pool=P.Pooling.AVE)
        n.loss2_conv = L.Convolution(n.loss2_ave_pool, num_output=128, kernel_size=1, stride=1,
                                     param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                     weight_filler=dict(type='xavier', weight_std=1),
                                     bias_filler=dict(type='constant', value=0.2))
        n.loss2_relu_conv = L.ReLU(n.loss2_conv, in_place=True)
        n.loss2_fc, n.loss2_relu_fc, n.loss2_drop_fc = \
            fc_relu_drop(n.loss2_conv, dict(num_output=1024, weight_type='xavier', weight_std=1, bias_type='constant',
                                            bias_value=0.2), dropout_ratio=0.7)
        n.loss2_classifier = L.InnerProduct(n.loss2_fc, num_output=self.classifier_num,
                                            param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                            weight_filler=dict(type='xavier'),
                                            bias_filler=dict(type='constant', value=0))
        n.loss2_loss = L.SoftmaxWithLoss(n.loss2_classifier, n.label, loss_weight=0.3)
        if phase == 'TRAIN':
            pass
        else:
            n.loss2_accuracy_top1 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1))
            n.loss2_accuracy_top5 = L.Accuracy(n.loss2_classifier, n.label, include=dict(phase=1),
                                               accuracy_param=dict(top_k=5))

        n.inception_4e_1x1, n.inception_4e_relu_1x1, n.inception_4e_3x3_reduce, n.inception_4e_relu_3x3_reduce, \
        n.inception_4e_3x3, n.inception_4e_relu_3x3, n.inception_4e_5x5_reduce, n.inception_4e_relu_5x5_reduce, \
        n.inception_4e_5x5, n.inception_4e_relu_5x5, n.inception_4e_pool, n.inception_4e_pool_proj, \
        n.inception_4e_relu_pool_proj, n.inception_4e_output = \
            inception(n.inception_4d_output, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
                                                  conv_5x5=128, pool_proj=128))
        n.pool4_3x3_s2 = L.Pooling(n.inception_4e_output, kernel_size=3, stride=2, pool=P.Pooling.MAX)
        n.inception_5a_1x1, n.inception_5a_relu_1x1, n.inception_5a_3x3_reduce, n.inception_5a_relu_3x3_reduce, \
        n.inception_5a_3x3, n.inception_5a_relu_3x3, n.inception_5a_5x5_reduce, n.inception_5a_relu_5x5_reduce, \
        n.inception_5a_5x5, n.inception_5a_relu_5x5, n.inception_5a_pool, n.inception_5a_pool_proj, \
        n.inception_5a_relu_pool_proj, n.inception_5a_output = \
            inception(n.pool4_3x3_s2, dict(conv_1x1=256, conv_3x3_reduce=160, conv_3x3=320, conv_5x5_reduce=32,
                                           conv_5x5=128, pool_proj=128))
        n.inception_5b_1x1, n.inception_5b_relu_1x1, n.inception_5b_3x3_reduce, n.inception_5b_relu_3x3_reduce, \
        n.inception_5b_3x3, n.inception_5b_relu_3x3, n.inception_5b_5x5_reduce, n.inception_5b_relu_5x5_reduce, \
        n.inception_5b_5x5, n.inception_5b_relu_5x5, n.inception_5b_pool, n.inception_5b_pool_proj, \
        n.inception_5b_relu_pool_proj, n.inception_5b_output = \
            inception(n.inception_5a_output, dict(conv_1x1=384, conv_3x3_reduce=192, conv_3x3=384, conv_5x5_reduce=48,
                                                  conv_5x5=128, pool_proj=128))
        n.pool5_7x7_s1 = L.Pooling(n.inception_5b_output, kernel_size=7, stride=1, pool=P.Pooling.AVE)
        n.pool5_drop_7x7_s1 = L.Dropout(n.pool5_7x7_s1, in_place=True,
                                        dropout_param=dict(dropout_ratio=0.4))
        n.loss3_classifier = L.InnerProduct(n.pool5_7x7_s1, num_output=self.classifier_num,
                                            param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                                            weight_filler=dict(type='xavier'),
                                            bias_filler=dict(type='constant', value=0))
        n.loss3_loss = L.SoftmaxWithLoss(n.loss3_classifier, n.label, loss_weight=1)
        if phase == 'TRAIN':
            pass
        else:
            n.loss3_accuracy_top1 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1))
            n.loss3_accuracy_top5 = L.Accuracy(n.loss3_classifier, n.label, include=dict(phase=1),
                                               accuracy_param=dict(top_k=5))
        return n.to_proto()
コード例 #30
0
def cnn_to_proto_loop(module_options, sample_ranges, dataset, prototxt_file):

    if dataset == 'mnist':
        prototxt_buffer = 'name: "LeNet"\n'
        prototxt_buffer += '"layer {"\n'
        prototxt_buffer += '  name: "data"\n'
        prototxt_buffer += '  type: "Input"\n'
        prototxt_buffer += '  top: "data"\n'
        prototxt_buffer += '  input_param { shape: { dim: ' + str(
            sample_ranges['train_batch_size_mnist']
        ) + ' dim: 1 dim: 28 dim: 28 } }\n}\n'
    elif dataset == 'cifar10':
        prototxt_buffer = 'name: "CIFAR10_full"\n'
        prototxt_buffer += 'input: "data"\n'
        prototxt_buffer += 'input_dim: ' + str(
            int(sample_ranges['train_batch_size_cifar10'])) + '\n'
        prototxt_buffer += 'input_dim: 3\n'
        prototxt_buffer += 'input_dim: 32\n'
        prototxt_buffer += 'input_dim: 32\n'
    else:
        print "Unknown dataset specified -- Exiting!!"
        exit()

    prototxt_buffer += '#pragma-off\n'

    n = caffe.NetSpec()
    if dataset == 'mnist':

        n.data, n.label = L.Data(
            name="mnist",
            batch_size=sample_ranges['train_batch_size_mnist'],
            backend=1,
            source=sample_ranges['train_source_mnist'],
            ntop=2,
            transform_param=dict(scale=0.00390625),
            include=dict(phase=0))

        prototxt_buffer += str(n.to_proto())
        n = caffe.NetSpec()
        n.data, n.label = L.Data(
            name="mnist",
            batch_size=sample_ranges['test_batch_size_mnist'],
            backend=1,
            source=sample_ranges['test_source_mnist'],
            ntop=2,
            transform_param=dict(scale=0.00390625),
            include=dict(phase=1))
        previous_layer = n.data

    elif dataset == 'cifar10':

        n.data, n.label = L.Data(
            name="cifar",
            batch_size=sample_ranges['train_batch_size_cifar10'],
            backend=1,
            source=sample_ranges['train_source_cifar10'],
            ntop=2,
            transform_param=dict(mean_file="./mean.binaryproto"),
            include=dict(phase=0))

        prototxt_buffer += str(n.to_proto())
        n = caffe.NetSpec()
        n.data, n.label = L.Data(
            name="cifar",
            batch_size=sample_ranges['test_batch_size_cifar10'],
            backend=1,
            source=sample_ranges['test_source_cifar10'],
            ntop=2,
            transform_param=dict(mean_file="./mean.binaryproto"),
            include=dict(phase=1))
        previous_layer = n.data

    prototxt_buffer += '\n#pragma-on\n'

    for num_layer in range(module_options['num_conv_layers']):

        layer_name_conv = 'conv' + str(num_layer + 1)
        n.conv_ = L.Convolution(
            previous_layer,
            name=layer_name_conv,
            kernel_size=module_options[layer_name_conv]['kernel_size'],
            stride=module_options[layer_name_conv]['stride'],
            num_output=module_options[layer_name_conv]['num_output'],
            weight_filler=dict(type='xavier'),
            bias_filler=dict(type='constant'),
            param=dict(lr_mult=2))

        layer_name_pool = 'pool' + str(num_layer + 1)
        n.pool_ = L.Pooling(
            n.conv_,
            name=layer_name_pool,
            kernel_size=module_options[layer_name_pool]['kernel_size'],
            stride=module_options[layer_name_pool]['stride'],
            pool=P.Pooling.MAX)

        previous_layer = n.pool_

    for num_layer in range(module_options['num_fc_layers']):

        layer_name_fc = 'ip' + str(num_layer + 1)
        n.ip_ = L.InnerProduct(
            previous_layer,
            name=layer_name_fc,
            num_output=module_options[layer_name_fc]['num_output'],
            weight_filler=dict(type='xavier'),
            bias_filler=dict(type='constant'),
            param=dict(lr_mult=2))
        layer_name_relu = 'relu' + str(num_layer + 1)
        n.relu_ = L.ReLU(n.ip_, name=layer_name_relu, in_place=True)

        previous_layer = n.relu_

    n.ip_last = L.InnerProduct(previous_layer,
                               num_output=10,
                               weight_filler=dict(type='xavier'),
                               bias_filler=dict(type='constant'),
                               param=dict(lr_mult=2))

    n.accuracy = L.Accuracy(n.ip_last, n.label, include=dict(phase=1))
    n.loss = L.SoftmaxWithLoss(n.ip_last, n.label)

    prototxt_buffer += str(n.to_proto())

    write_on_1 = True
    past_first_layer = False
    write_on_2 = True
    lines = prototxt_buffer.split('\n')

    # generate model prototxt file
    with open(prototxt_file, 'w') as f:
        for line in lines:

            if line.strip() == '#pragma-off':
                write_on_1 = False
                continue
            if line.strip() == '#pragma-on':
                write_on_1 = True
                continue

            if line.strip() == '"layer {"':
                f.write("layer {\n")
                continue

            if line.strip(
            ) == 'layer {' and write_on_1 and not past_first_layer:
                if dataset == 'cifar10':
                    f.write("\n")
                past_first_layer = True
                write_on_2 = False
                continue
            if line.strip() == 'layer {' and past_first_layer:
                write_on_2 = True

            if write_on_1 and write_on_2:
                #print line

                # dstam -- find and replace pycaffe default naming to match paleo syntax
                line_ = line
                for next_conv in range(module_options['num_conv_layers'] - 1):

                    if line == '  top: "Convolution' + str(next_conv +
                                                           1) + '"':
                        line_ = '  top: "conv' + str(next_conv + 1) + '"'
                    if line == '  top: "Pooling' + str(next_conv + 1) + '"':
                        line_ = '  top: "pool' + str(next_conv + 1) + '"'

                    if line == '  bottom: "Convolution' + str(next_conv +
                                                              1) + '"':
                        line_ = '  bottom: "conv' + str(next_conv + 1) + '"'
                    if line == '  bottom: "Pooling' + str(next_conv + 1) + '"':
                        line_ = '  bottom: "pool' + str(next_conv + 1) + '"'

                if line == '  top: "conv_"':
                    line_ = '  top: "conv' + str(
                        module_options['num_conv_layers']) + '"'
                if line == '  top: "pool_"':
                    line_ = '  top: "pool' + str(
                        module_options['num_conv_layers']) + '"'
                if line == '  bottom: "conv_"':
                    line_ = '  bottom: "conv' + str(
                        module_options['num_conv_layers']) + '"'
                if line == '  bottom: "pool_"':
                    line_ = '  bottom: "pool' + str(
                        module_options['num_conv_layers']) + '"'

                for next_ip in range(module_options['num_fc_layers'] - 1):

                    if line == '  top: "InnerProduct' + str(next_ip + 1) + '"':
                        line_ = '  top: "ip' + str(next_ip + 1) + '"'
                    if line == '  bottom: "InnerProduct' + str(next_ip +
                                                               1) + '"':
                        line_ = '  bottom: "ip' + str(next_ip + 1) + '"'

                if line == '  top: "ip_"':
                    line_ = '  top: "ip' + str(
                        module_options['num_fc_layers']) + '"'
                if line == '  top: "ip_last"':
                    line_ = '  top: "ip' + str(
                        module_options['num_fc_layers'] + 1) + '"'

                if line == '  bottom: "ip_"':
                    line_ = '  bottom: "ip' + str(
                        module_options['num_fc_layers']) + '"'
                if line == '  bottom: "ip_last"':
                    line_ = '  bottom: "ip' + str(
                        module_options['num_fc_layers'] + 1) + '"'
                if line == '  name: "ip_last"':
                    line_ = '  name: "ip' + str(
                        module_options['num_fc_layers'] + 1) + '"'

                f.write(line_ + "\n")