예제 #1
0
def caffenet(lmdb, batch_size=256, include_acc=False):
    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

    # the net itself
    conv1, relu1 = conv_relu(data, 11, 96, stride=4)
    pool1 = max_pool(relu1, 3, stride=2)
    norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
    conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
    pool2 = max_pool(relu2, 3, stride=2)
    norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
    conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
    conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
    conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
    pool5 = max_pool(relu5, 3, stride=2)
    fc6, relu6 = fc_relu(pool5, 4096)
    drop6 = L.Dropout(relu6, in_place=True)
    fc7, relu7 = fc_relu(drop6, 4096)
    drop7 = L.Dropout(relu7, in_place=True)
    fc8 = L.InnerProduct(drop7, num_output=1000)
    loss = L.SoftmaxWithLoss(fc8, label)

    if include_acc:
        acc = L.Accuracy(fc8, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #2
0
def Lenet(img_list, batch_size, include_acc=False):
    # 第一层,数据输入层,以ImageData格式输入
    data, label = L.ImageData(source=img_list, batch_size=batch_size, ntop=2, root_folder=root,
                              transform_param=dict(scale=0.00390625))
    # 第二层:卷积层
    conv1 = L.Convolution(data, kernel_size=5, stride=1, num_output=20, pad=0, weight_filler=dict(type='xavier'))
    # 池化层
    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    # 卷积层
    conv2 = L.Convolution(pool1, kernel_size=5, stride=1, num_output=50, pad=0, weight_filler=dict(type='xavier'))
    # 池化层
    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    # 全连接层
    fc3 = L.InnerProduct(pool2, num_output=500, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu3 = L.ReLU(fc3, in_place=True)
    # 全连接层
    fc4 = L.InnerProduct(relu3, num_output=10, weight_filler=dict(type='xavier'))
    # softmax层
    loss = L.SoftmaxWithLoss(fc4, label)

    if include_acc:  # test阶段需要有accuracy层
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #3
0
def caffenet(lmdb, batch_size=256, include_acc=False):
    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

    # the net itself
    # convolve adjacent input pixels. Output image is of size 8
    conv1, relu1 = conv_relu(data, 3, 8)
    # pools over relu1, the rectified convolutions
    pool1 = max_pool(relu1, 2)
    #norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
    #conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
    #pool2 = max_pool(relu2, 3, stride=2)
    #norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
    #conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
    #conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
    #conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
    #pool5 = max_pool(relu5, 3, stride=2)
    fc6, relu6 = fc_relu(pool1, 64)
    drop6 = L.Dropout(relu6, in_place=True)
    #fc7, relu7 = fc_relu(drop6, 4096)
    #drop7 = L.Dropout(relu7, in_place=True)
    fc8 = L.InnerProduct(drop6, num_output=64)
    loss = L.SoftmaxWithLoss(fc8, label)

    if include_acc:
        acc = L.Accuracy(fc8, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #4
0
def stacked_hourglass_network(batch_size,
                              img_size,
                              nfeats,
                              multi,
                              out_dim,
                              include_acc=False):
    data, label = L.MemoryData(batch_size=batch_size,
                               channels=3,
                               height=img_size,
                               width=img_size,
                               ntop=2,
                               include=dict(phase=0))
    data = L.Input()
    conv1 = conv_bn_relu(data, kernel_size=3, num_output=32, stride=2, pad=1)
    r1 = residual_mobile(conv1, num_output=32, multi=2, num_input=32)
    pool1 = L.Pooling(r1, pool=P.Pooling.MAX, stride=2, kernel_size=2)
    r3 = residual_mobile(pool1, num_output=nfeats, multi=multi, num_input=32)
    #
    hg = hourglass_mobile(r3,
                          num_output=nfeats,
                          num_modual=4,
                          multi=multi,
                          num_input=nfeats)
    hgr = residual_mobile(hg, num_output=nfeats, multi=multi, num_input=nfeats)
    ll = conv_bn_relu(hgr, kernel_size=1, num_output=nfeats, stride=1, pad=0)
    out = deconv(ll, num_output=out_dim, kernel_size=4, stride=2, pad=1)

    loss = L.SigmoidCrossEntropyLoss(out, label)
    if include_acc:
        acc = L.Accuracy(out, label, include=dict(phase=1))
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #5
0
def My_layer(source_dir, batch_size, target_size):
    param_str = "'source_dir': source_dir, 'batch_size': batch_size, 'target_size': target_size"
    mylayer = L.Python(module='MyPythonLayer',
                       layer='myPythonLayer',
                       param_str=param_str)
    print(mylayer)
    to_proto(mylayer)
예제 #6
0
파일: mnist.py 프로젝트: xuwenying/mnist
def Lenet(img_list,batch_size,include_acc=False):
    #第一层,数据输入层,以ImageData格式输入
    data, label = L.ImageData(source=img_list, batch_size=batch_size, ntop=2,root_folder=root,
        transform_param=dict(scale= 0.00390625))
    #第二层:卷积层
    conv1=L.Convolution(data, kernel_size=5, stride=1,num_output=20, pad=0,weight_filler=dict(type='xavier'))
    #池化层
    pool1=L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    #卷积层
    conv2=L.Convolution(pool1, kernel_size=5, stride=1,num_output=50, pad=0,weight_filler=dict(type='xavier'))
    #池化层
    pool2=L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    #全连接层
    fc3=L.InnerProduct(pool2, num_output=500,weight_filler=dict(type='xavier'))
    #激活函数层
    relu3=L.ReLU(fc3, in_place=True)
    #全连接层
    fc4 = L.InnerProduct(relu3, num_output=10,weight_filler=dict(type='xavier'))
    #softmax层
    loss = L.SoftmaxWithLoss(fc4, label)
    
    if include_acc:             # test阶段需要有accuracy层
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #7
0
def alexnet_ava_finetuning(hdf5, batch_size = 256, include_acc = False):
	data, label = L.HDF5Data(source = hdf5, batch_size = batch_size, ntop = 2) # no transform_param in HDF5.

	conv1, relu1 = conv_relu(data, 11, 96, stride = 4)
	norm1 = L.LRN(conv1, local_size = 5, alpha = 1e-4, beta = 0.75)
	pool1 = max_pool(norm1, 3, stride = 2)
	conv2, relu2 = conv_relu(pool1, 5, 256, pad = 2, group = 2)
	norm2 = L.LRN(conv2, local_size = 5, alpha = 1e-4, beta = 0.75)
	pool2 = max_pool(norm2, 3, stride = 2)
	conv3, relu3 = conv_relu(pool2, 3, 384, pad = 1)
	conv4, relu4 = conv_relu(relu3, 3, 384, pad = 1, group = 2)
	conv5, relu5 = conv_relu(relu4, 3, 256, pad = 1, group = 2)
	pool5 = max_pool(relu5, 3, stride = 2)
	fc6, relu6 = fc_relu(pool5, 4096)
	drop6 = L.Dropout(relu6, in_place = True)
	fc7, relu7 = fc_relu(drop6, 4096)
	drop7 = L.Dropout(relu7, in_place = True)
	fc8_f = L.InnerProduct(drop7, num_output = 10)
	
	loss = L.SigmoidCrossEntripyLoss(fc8_f, label)#Loss function can change whatever you need.

	if include_acc:
		acc = L.Accuracy(fc8_f, label)
		return to_proto(loss, acc)
	else:
		return to_proto(loss)
예제 #8
0
def densenet(net_cfg=None,
             data_cfg=None,
             batch_size=None,
             mode='train',
             datafile=None):
    if mode == 'deploy':
        data = L.Input(name='data',
                       ntop=1,
                       shape=dict(dim=data_cfg['imgsize']))
    else:
        data, label = L.Data(name='data',
                             source=datafile,
                             backend=P.Data.LMDB,
                             batch_size=batch_size,
                             ntop=2,
                             transform_param=dict(mirror=True,
                                                  crop_size=32,
                                                  mean_value=[0, 0, 0],
                                                  scale=1))

    if data_cfg['Dataset'] == 'IMAGENET':
        # 7x7 convolution followed by 3x3 max pooling
        conv1 = conv_bn_scale_relu(data, 7, 64, stride=2, pad=3)
        pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
        _out = pool1

    if data_cfg['Dataset'] == 'CIFAR':
        # 3x3 convolution
        # conv1 = L.Convolution(data, kernel_size=3, stride=1, num_output=nchannels,
        #                     pad=1, bias_term=False, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
        conv1 = conv_bn_scale_relu(data, 3, 24, stride=1, pad=1)
        _out = conv1

    # Build the denseblock
    dense_block_id = 0
    for i, item in enumerate(net_cfg):
        layer_type = item['Type']
        if layer_type == 'DenseBlock':
            _out = dense_block(_out, item['conv1Num'], item['conv3Num'],
                               dense_block_id, item['layerNum'],
                               item['dropout'])
            dense_block_id += 1
        elif layer_type == 'Transition':
            _out = transition_layer(_out, item['conv1Num'])
        elif layer_type == 'Classification':
            _out = classfication_layer(_out, item['OutputNum'])
        else:
            raise ValueError, 'layer_type not supported' + item['Type']

    #Connect full connected network to softmax
    fc = _out
    if mode == 'deploy':
        prob = L.Softmax(fc, name='prob')
        return to_proto(prob)
    else:
        loss = L.SoftmaxWithLoss(fc, label)
        acc = L.Accuracy(fc, label)
        return to_proto(loss, acc)
예제 #9
0
def synthesize(pipeline,
               output_dir,
               data_shape,
               data_layer="",
               prefix="",
               qnnengine="bitserial"):
    if len(data_shape) != 4:
        raise Exception(
            "data_shape must be 4D array (batchsize, channels, h, w)")
    deployFileName = output_dir + '/%sdeploy.prototxt' % prefix
    testFileName = output_dir + '/%stest.prototxt' % prefix
    paramFileName = output_dir + '/%sweights.caffemodel' % prefix
    # create output dir if it does not exist
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    # convert pipeline into synthesizable form
    pipeline = prepare_pipeline(pipeline, qnnengine)
    # generate deploy.prototxt
    model = caffe.layers.Input(shape=dict(dim=data_shape))
    # generate Caffe model from each layer
    for L in pipeline:
        model = L.codegen(model)
    # save generated model to prototxt
    with open(deployFileName, 'w') as f:
        f.write(str(caffe.to_proto(model)))
    # load generated model
    net = caffe.Net(network_file=deployFileName, phase=caffe.TEST)
    # call each layer's filler function
    for L in pipeline:
        L.paramfill(net)
    # save the resulting parameters
    net.save(paramFileName)
    # only generate test prototxt if data layer is specified
    if data_layer != "":
        # unpack the data layer as tuple
        data, label = data_layer
        # Caffe model which we'll turn into a prototxt
        model = data
        # generate Caffe model from each layer
        for L in pipeline:
            model = L.codegen(model)
        # add top-1 and top-5 accuracy layers
        model_top1 = caffe.layers.Accuracy(model, label)
        model_top5 = caffe.layers.Accuracy(model,
                                           label,
                                           accuracy_param=dict(top_k=5))
        # save generated model to prototxt
        with open(testFileName, 'w') as f:
            f.write(str(caffe.to_proto(model_top1, model_top5)))
    # return the created Caffe net object
    return net
예제 #10
0
def create_net(img_list, batch_size, include_acc=True):
    # data,label=L.ImageData(source=img_list,batch_size=batch_size,new_width=48,new_height=48,ntop=2,
    #                        transform_param=dict(crop_size=40,mirror=True))
    # data,label=L.MemoryData(batch_size=batch_size,channels=3,height=256,width=256,ntop=2,
    #                         include=dict(phase=0))

    conv1 = L.Convolution(data,
                          kernel_size=5,
                          stride=1,
                          num_output=16,
                          pad=2,
                          weight_filler=dict(type='xavier'))
    relu1 = L.ReLU(conv1, in_place=True)
    pool1 = L.Pooling(relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    conv2 = L.Convolution(pool1,
                          kernel_size=53,
                          stride=1,
                          num_output=32,
                          pad=1,
                          weight_filler=dict(type='xavier'))
    relu2 = L.ReLU(conv2, in_place=True)
    pool2 = L.Pooling(relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    conv3 = L.Convolution(pool2,
                          kernel_size=53,
                          stride=1,
                          num_output=32,
                          pad=1,
                          weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(conv3, in_place=True)
    pool3 = L.Pooling(relu3, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    fc4 = L.InnerProduct(pool3,
                         num_output=1024,
                         weight_filler=dict(type='xavier'))
    relu4 = L.ReLU(fc4, in_place=True)
    drop4 = L.Dropout(relu4, in_place=True)
    fc5 = L.InnerProduct(drop4,
                         num_output=7,
                         weight_filler=dict(type='xavier'))
    # loss = L.SoftmaxWithLoss(fc5, label)
    loss = L.SigmoidCrossEntropyLoss(fc5, label)

    if include_acc:
        acc = L.Accuracy(fc5,
                         label,
                         top_k=5,
                         name='loss',
                         include=dict(phase=1))
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #11
0
	def MakeNetwork(self,db,batch_size,layers,deploy,act,input_dropout,hidden_dropout,L2,filler):
		
		#Create Data layer
		data, label = L.HDF5Data(source=db,batch_size=batch_size,ntop=2)
	
		#Add hidden layers
		top = data
		if(input_dropout!=0):
			top = L.Dropout(top, in_place=True, dropout_ratio = input_dropout)
		
		test = 0
		for x in range(0,len(layers)):
			if(L2):
				if(filler==1):
					top = L.InnerProduct(top, num_output=layers[x], weight_filler=dict(type='xavier'),bias_filler=dict(type='xavier'),param=[dict(decay_mult=1)])
				elif(filler==2):
					top = L.InnerProduct(top, num_output=layers[x], weight_filler=dict(type='gaussian',std=0.01),bias_filler=dict(type='gaussian',std=0.01),param=[dict(decay_mult=1)])

			else:
				if(filler==1):
					top = L.InnerProduct(top, num_output=layers[x], weight_filler=dict(type='xavier'),bias_filler=dict(type='xavier'),param=[dict(decay_mult=0)])
				elif(filler==2):
					top = L.InnerProduct(top, num_output=layers[x], weight_filler=dict(type='gaussian',std=0.01),bias_filler=dict(type='gaussian',std=0.01),param=[dict(decay_mult=0)])

	
			if(act == 1):
				top = L.ReLU(top,in_place=True)
			elif(act == 2):
				top = L.Sigmoid(top, in_place=True)
			elif(act == 3):
				top = L.TanH(top, in_place=True)
			else:
				print "Error, invalid activation function choice "
			if(hidden_dropout!=0):
				top = L.Dropout(top, in_place=True, dropout_ratio = hidden_dropout)
	
		#Add Output Layers
		if(filler==1):
			output = L.InnerProduct(top, num_output=self._numClasses,weight_filler=dict(type='xavier'),bias_filler=dict(type='xavier'))
		elif(filler==2):
			output = L.InnerProduct(top, num_output=self._numClasses,weight_filler=dict(type='gaussian',std=0.01),bias_filler=dict(type='gaussian',std=0.01))

		if(deploy == False):
			loss = L.SoftmaxWithLoss(output,label)
			return to_proto(loss)	
		else:
			prob = L.Softmax(output)
			return to_proto(prob)
예제 #12
0
def create_deploy():
    # No the first layer: data layer
    conv1 = L.Convolution(bottom='data',
                          kernel_size=5,
                          stride=1,
                          num_output=20,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    conv2 = L.Convolution(pool1,
                          kernel_size=5,
                          stride=1,
                          num_output=50,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    fc3 = L.InnerProduct(pool2,
                         num_output=500,
                         weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(fc3, in_place=True)
    fc4 = L.InnerProduct(relu3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    # No accuracy layer, but has softmax layer
    prob = L.Softmax(fc4)
    return to_proto(prob)
예제 #13
0
def fcn16_1(train_lmdb, val_lmdb, batch_size):
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=train_lmdb,
                         transform_param=dict(crop_size=128,mirror=True), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=val_lmdb,
                         transform_param=dict(crop_size=128), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    conv1 = conv_block(data,3,16,stride=1,pad=1)
    pool1 = max_pool(conv1,2)
    conv2 = conv_block(pool1,3,32,stride=1,pad=1)
    pool2 = max_pool(conv2,2)
    conv3 = conv_block(pool2,2,64,stride=1,pad=1)
    pool3 = max_pool(conv3,2)
    conv4 = conv15x15_block(pool3,128)
    fc0 = conv_block(conv4,1,128,stride=1,pad=0)
    fc1 = L.Convolution(fc0, kernel_size=1, num_output=4,stride=1,pad=0,
                        weight_filler=dict(type='xavier'))
    upsample = deconv_block(fc1,ks=16,nout=4,stride=8)
    crop = L.Crop(upsample,data,crop_param=dict(axis=2,offset=4))
    loss =  L.SoftmaxWithLoss(crop, label)
    acc = L.Accuracy(crop, label, accuracy_param=dict(top_k=1))
    return caffe.to_proto(loss,acc)
예제 #14
0
def resnet(train_lmdb, test_lmdb, batch_size=256, stages=[2, 2, 2, 2], input_size=128, first_output=32, include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size, height=input_size, width=input_size, channels=3, ntop=2,
        transform_param=dict(mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))

    # the net itself
    relu1 = conv_factory_relu(data, 3, first_output, stride=1, pad=1)
    relu2 = conv_factory_relu(relu1, 3, first_output, stride=1, pad=1)
    residual = max_pool(relu2, 3, stride=2)
    
    for i in stages[1:]:
        first_output *= 2
        for j in range(i):
            if j==0:
                if i==0:
                    residual = residual_factory_proj(residual, first_output, 1)
                else:
                    residual = residual_factory_proj(residual, first_output, 2)
            else:
                residual = residual_factory1(residual, first_output)

    glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True);
    fc = L.InnerProduct(glb_pool, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #15
0
def normnet(train_lmdb, test_lmdb, batch_size=256, stages=[2, 2, 2, 2], input_size=32, first_output=32, include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size, height=input_size, width=input_size, channels=3, ntop=2,
        transform_param=dict(mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))

    # the net itself
    
    conv1 = conv_factory(data, 3, 96, 1, 1)
    in3a = SimpleFactory(conv1, 32, 32)
    in3b = SimpleFactory(in3a, 32, 48)
    in3c = DownsampleFactory(in3b, 80)
    in4a = SimpleFactory(in3c, 112, 48)
    in4b = SimpleFactory(in4a, 96, 64)
    in4c = SimpleFactory(in4b, 80, 80)
    in4d = SimpleFactory(in4c, 48, 96)
#    for i in range(25):
#        in4d = SimpleFactory(in4d, 48, 96)
    in4e = DownsampleFactory(in4d, 96)
    in5a = SimpleFactory(in4e, 176, 160)
    in5b = SimpleFactory(in5a, 176, 160)

    pool = avg_pool(in5b, 8)

    fc = L.InnerProduct(pool, num_output=10, weight_filler=dict(type='xavier'))
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
def resnet_cifar(depth=16, widen_factor=1, classes=10):
    assert (depth-4) % 6 == 0
    n = (depth - 4) / 6

    data, label = L.Data(ntop=2, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    residual = conv_factory(None, 3, 16, 1, 1)

    residual = residual_block(residual, 16*widen_factor, 1, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 16*widen_factor)

    residual = residual_block(residual, 32*widen_factor, 2, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 32*widen_factor)

    residual = residual_block(residual, 64*widen_factor, 2, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 64*widen_factor)

    global_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True)
    fc = L.InnerProduct(global_pool,num_output=classes,
                        bias_filler=dict(type='constant', value=0), weight_filler=dict(type='msra'))
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
def create_deploy():
    #少了第一层,data层
    conv1 = L.Convolution(bottom = "data", kernel_size=5, stride=1, num_output=20, pad=4, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu1 = L.ReLU(conv1, in_place=True)
    # 卷积层
    conv2 = L.Convolution(relu1, kernel_size=5, stride=1, num_output=50, pad=4, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu2 = L.ReLU(conv2, in_place=True)
    # 池化层
    pool1 = L.Pooling(relu2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    # 第二层:卷积层
    conv3 = L.Convolution(pool1, kernel_size=5, stride=1, num_output=20, pad=4, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu3 = L.ReLU(conv3, in_place=True)
    # 卷积层
    conv4 = L.Convolution(relu3, kernel_size=5, stride=1, num_output=50, pad=4, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu4 = L.ReLU(conv4, in_place=True)
    # 全连接层
    fc3 = L.InnerProduct(relu4, num_output=500, weight_filler=dict(type='xavier'))
    # 激活函数层
    relu3 = L.ReLU(fc3, in_place=True)
    # 全连接层
    fc4 = L.InnerProduct(relu3, num_output=10, weight_filler=dict(type='xavier'))
    #最后没有accuracy层,但有一个Softmax层
    prob=L.Softmax(fc4)
    return to_proto(prob)
예제 #18
0
def fcn32(train_lmdb, val_lmdb, batch_size):
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=train_lmdb,
                         transform_param=dict(crop_size=128,mirror=True), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=val_lmdb,
                         transform_param=dict(crop_size=128), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    conv1 = conv_block(data,3,16,stride=2,pad=1)
    pool1 = max_pool(conv1,2)
    conv2 = conv_block(pool1,1,64,stride=1,pad=0)
    conv3 = conv_block(conv1,2,16,stride=2,pad=0)
    conv4 = conv_block(conv3,3,16,stride=1,pad=1)
    conv5 = conv_block(conv4,1,64,stride=1,pad=0)
    sum0 = eltwise_sum(conv2,conv5)
    conv6 = conv31x31_block(sum0,128)    
    fc0 = conv_block(conv6,1,128,stride=1,pad=0)
    fc1 = L.Convolution(fc0, kernel_size=1, num_output=4,stride=1,pad=0)
    upsample = deconv_block(fc1,8,4,stride=4)
    crop = L.Crop(upsample,data,crop_param=dict(axis=2,offset=2))
    loss =  L.SoftmaxWithLoss(crop, label)
    acc = L.Accuracy(crop, label, accuracy_param=dict(top_k=1))

    return caffe.to_proto(loss,acc)
def wrn_ilsvrc(train_lmdb, test_lmdb, batch_size=16, stages=[64, 128, 160, 320, 640]):
    # there will only be a TEST phase data layer, TRAIN needs to be added manually
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, transform_param=dict(crop_size=151, mean_value=[104, 117, 123], mirror=True), include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, transform_param=dict(crop_size=151, mean_value=[104, 117, 123], mirror=True), include=dict(phase=getattr(caffe_pb2, 'TEST')))

    #stem
    conv1 = L.Convolution(data, kernel_size=3, stride=2, num_output=stages[0], pad=0, weight_filler=dict(type='msra')) # 151 x 151 -> 75 x 75 - 64
    bn_relu_1 = batchnorm_scale_relu(conv1)

    #WRN
    conv2 = L.Convolution(bn_relu_1, kernel_size=3, stride=2, num_output=stages[1], pad=0, weight_filler=dict(type='msra')) #75 x 75 -> 37 x 37 - 128

    bn_relu_2 = batchnorm_scale_relu(conv2)
    stage1_expansion = wrn_expansion(bn_relu_2, 3, stages[2],stride_first_conv=1, pad_first_conv=1) # 37 x 37 -> 37 x 37 - 160
    stage1_resblock_1 = wrn_no_expansion(stage1_expansion, 3, stages[2])
    stage1_resblock_2 = wrn_no_expansion(stage1_resblock_1, 3, stages[2])
    stage1_resblock_3 = wrn_no_expansion(stage1_resblock_2, 3, stages[2])

    stage2_expansion = wrn_expansion(stage1_resblock_3, 3, stages[3], stride_first_conv=2, pad_first_conv=1) # 37 x 37 -> 19 x 19 - 320
    stage2_resblock_1 = wrn_no_expansion(stage2_expansion, 3, stages[3])
    stage2_resblock_2 = wrn_no_expansion(stage2_resblock_1, 3, stages[3])
    stage2_resblock_3 = wrn_no_expansion(stage2_resblock_2, 3, stages[3])
    
    stage3_expansion = wrn_expansion(stage2_resblock_3, 3, stages[4], stride_first_conv=2, pad_first_conv=1) # 19 x 19 -> 10 x 10 - 640
    stage3_resblock_1 = wrn_no_expansion(stage3_expansion, 3, stages[4])
    stage3_resblock_2 = wrn_no_expansion(stage3_resblock_1, 3, stages[4])
    stage3_resblock_3 = wrn_no_expansion(stage3_resblock_2, 3, stages[4])

    #glb_pool = L.Pooling(stage5_resblock_2, kernel_size=4, stride=4, pool=P.Pooling.AVE, global_pooling=True);
    glb_pool = L.Pooling(stage3_resblock_3, kernel_size=5, stride=5, pool=P.Pooling.AVE); # 10 x 10 -> 2 x 2
    fc = L.InnerProduct(glb_pool, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
def resnet(leveldb, batch_size=128, stages=[2, 2, 2, 2], first_output=16):
    feature_size=32
    data, label = L.Data(source=leveldb, backend=P.Data.LEVELDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=feature_size, mirror=True))
    residual = conv_factory_relu(data, 3, first_output, stride=1, pad=1)
    
    st = 0
    for i in stages[1:]:
        st += 1
        for j in range(i):
            if j==i-1:
                first_output *= 2
                feature_size /= 2
                if i==0:#never called
                    residual = residual_factory_proj(residual, first_output, 1)

                # bottleneck layer, but not at the last stage
                elif st != 3:
                    if real:
                        residual = residual_factory_padding1(residual, num_filter=first_output, stride=2, 
                            batch_size=batch_size, feature_size=feature_size)
                    else:
                        residual = residual_factory_padding2(residual, num_filter=first_output, stride=2, 
                            batch_size=batch_size, feature_size=feature_size)
            else:
                if real:
                    residual = residual_factory1(residual, first_output)
                else:
                    residual = residual_factory2(residual, first_output)


    glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True);
    fc = L.InnerProduct(glb_pool, num_output=10,bias_term=True, weight_filler=dict(type='msra'))
    loss = L.SoftmaxWithLoss(fc, label)
    return to_proto(loss)
예제 #21
0
def fcn16_enet(train_lmdb, val_lmdb, batch_size):
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=train_lmdb,
                         transform_param=dict(crop_size=128,mirror=True), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, 
                         source=val_lmdb,
                         transform_param=dict(crop_size=128), 
                         ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    conv1 = conv_block(data,3,16,stride=2,pad=1)
    sum0 = bottleneck2_block(conv1,16)
    sum1 = bottleneck2_block(sum0, 32) 
    bn0 = bottleneck3x3_block(sum1,32)
    bn1 = bottleneck3x3_block(bn0,32,dilation=2)
    bn2 = bottleneck5x5_block(bn1,32)
    bn3 = bottleneck3x3_block(bn2,32,dilation=4)
    fc0 = L.Convolution(bn3, kernel_size=1, num_output=4,stride=1,pad=0,
                        weight_filler=dict(type='xavier'))
    upsample = deconv_block(fc0,ks=16,nout=4,stride=8)
    crop = L.Crop(upsample,data,crop_param=dict(axis=2,offset=4))
    loss =  L.SoftmaxWithLoss(crop, label)
    acc = L.Accuracy(crop, label, accuracy_param=dict(top_k=1))
    return caffe.to_proto(loss,acc)
예제 #22
0
def create_deploy():
    #少了第一层,data层
    conv1 = L.Convolution(bottom='data',
                          kernel_size=5,
                          stride=1,
                          num_output=20,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    conv2 = L.Convolution(pool1,
                          kernel_size=5,
                          stride=1,
                          num_output=50,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    fc3 = L.InnerProduct(pool2,
                         num_output=500,
                         weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(fc3, in_place=True)
    fc4 = L.InnerProduct(relu3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    #最后没有accuracy层,但有一个Softmax层
    prob = L.Softmax(fc4)
    return to_proto(prob)
def resnet_cifar(depth=16, widen_factor=1, classes=10):
    assert (depth - 4) % 6 == 0
    n = (depth - 4) / 6

    data, label = L.Data(ntop=2,
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    residual = conv_factory(None, 3, 16, 1, 1)

    residual = residual_block(residual, 16 * widen_factor, 1, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 16 * widen_factor)

    residual = residual_block(residual, 32 * widen_factor, 2, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 32 * widen_factor)

    residual = residual_block(residual, 64 * widen_factor, 2, True)
    for i in xrange(n - 1):
        residual = residual_block(residual, 64 * widen_factor)

    global_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True)
    fc = L.InnerProduct(global_pool,
                        num_output=classes,
                        bias_filler=dict(type='constant', value=0),
                        weight_filler=dict(type='msra'))
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #24
0
def mynet():
    data, label = L.DummyData(
        shape=[dict(dim=[8, 1, 28, 28]),
               dict(dim=[8, 1, 1, 1])],
        transform_param=dict(scale=1. / 255),
        ntop=2)

    # CAFFE = 1
    # MKL2017 = 3
    kwargs = {'engine': 3}
    conv1 = L.Convolution(data,
                          kernel_size=[3, 4, 5],
                          num_output=3,
                          pad=[1, 2, 3])
    bn1 = L.BatchNorm(conv1, **kwargs)
    relu1 = L.ReLU(bn1, **kwargs)

    convargs = {
        'param':
        [dict(lr_mult=1, decay_mult=1),
         dict(lr_mult=2, decay_mult=2)],
        'convolution_param':
        dict(num_output=64,
             kernel_size=2,
             stride=2,
             engine=P.Convolution.CAFFE,
             bias_filler=dict(type='constant', value=0),
             weight_filler=dict(type='xavier'))
    }
    deconv1 = L.Deconvolution(relu1, **convargs)

    return to_proto(deconv1)
def wrn(train_lmdb, test_lmdb, batch_size=32, stages=[16, 160, 320, 640], input_size=32, first_output=32, include_acc=False):
    #code can't recognize include phase at the moment thus there is only be a TEST phase data layer, TRAIN layer needs to be added manually
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, transform_param=dict(crop_size=32, mean_value=[104, 117, 123], mirror=True), include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, transform_param=dict(crop_size=32, mean_value=[104, 117, 123], mirror=True), include=dict(phase=getattr(caffe_pb2, 'TEST')))

    conv1 = L.Convolution(data, kernel_size=3, stride=1, num_output=stages[0], pad=1, weight_filler=dict(type='msra'))

    bn_relu_1 = batchnorm_scale_relu(conv1)
    stage1_expansion = wrn_expansion(bn_relu_1, 3, stages[1], stride_first_conv=1, pad_first_conv=1)
    stage1_resblock_1 = wrn_no_expansion(stage1_expansion, 3, stages[1])
    stage1_resblock_2 = wrn_no_expansion(stage1_resblock_1, 3, stages[1])
    stage1_resblock_3 = wrn_no_expansion(stage1_resblock_2, 3, stages[1])

    stage2_expansion = wrn_expansion(stage1_resblock_3, 3, stages[2], stride_first_conv=2, pad_first_conv=1)
    stage2_resblock_1 = wrn_no_expansion(stage2_expansion, 3, stages[2])
    stage2_resblock_2 = wrn_no_expansion(stage2_resblock_1, 3, stages[2])
    stage2_resblock_3 = wrn_no_expansion(stage2_resblock_2, 3, stages[2])
    
    stage3_expansion = wrn_expansion(stage2_resblock_3, 3, stages[3], stride_first_conv=2, pad_first_conv=1)
    stage3_resblock_1 = wrn_no_expansion(stage3_expansion, 3, stages[3])
    stage3_resblock_2 = wrn_no_expansion(stage3_resblock_1, 3, stages[3])
    stage3_resblock_3 = wrn_no_expansion(stage3_resblock_2, 3, stages[3])

    glb_pool = L.Pooling(stage3_resblock_3, pool=P.Pooling.AVE, global_pooling=True);
    fc = L.InnerProduct(glb_pool, num_output=10)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #26
0
def densenet(data_file, mode='train', batch_size=64, depth=40, first_output=16, growth_rate=12, dropout=0.2):
    data, label = L.Data(source=data_file, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, 
              transform_param=dict(mean_file="/home/zl499/caffe/examples/cifar10/mean.binaryproto"))

    nchannels = first_output
    model = L.Convolution(data, kernel_size=3, stride=1, num_output=nchannels,
                        pad=1, bias_term=False, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))

    N = (depth-4)/3
    for i in range(N):
        model = add_layer(model, growth_rate, dropout)
        nchannels += growth_rate
    model = transition(model, nchannels, dropout)

    for i in range(N):
        model = add_layer(model, growth_rate, dropout)
        nchannels += growth_rate
    model = transition(model, nchannels, dropout)

    for i in range(N):
        model = add_layer(model, growth_rate, dropout)
        nchannels += growth_rate


    model = L.BatchNorm(model, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
    model = L.Scale(model, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
    model = L.ReLU(model, in_place=True)
    model = L.Pooling(model, pool=P.Pooling.AVE, global_pooling=True)
    model = L.InnerProduct(model, num_output=10, bias_term=True, weight_filler=dict(type='xavier'), bias_filler=dict(type='constant'))
    loss = L.SoftmaxWithLoss(model, label)
    accuracy = L.Accuracy(model, label)
    return to_proto(loss, accuracy)
예제 #27
0
def resnet(train_lmdb, test_lmdb, batch_size=256, stages=[2, 2, 2, 2], input_size=128, first_output=32, include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size, height=input_size, width=input_size, channels=3, ntop=2,
        transform_param=dict(mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))

    # the net itself
    relu1 = conv_factory_relu(data, 3, first_output, stride=1, pad=1)
    relu2 = conv_factory_relu(relu1, 3, first_output, stride=1, pad=1)
    residual = max_pool(relu2, 3, stride=2)
    
    for i in stages[1:]:
        first_output *= 2
        for j in range(i):
            if j==0:
                if i==0:
                    residual = residual_factory_proj(residual, first_output, 1)
                else:
                    residual = residual_factory_proj(residual, first_output, 2)
            else:
                residual = residual_factory1(residual, first_output)

    glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True);
    fc = L.InnerProduct(glb_pool, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #28
0
def InceptionResNetV1(train_lmdb, test_lmdb, input_size=299, batch_size=256, stages=[0, 5, 10, 5], first_output=32, include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=input_size, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=input_size, mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size, height=input_size, width=input_size, channels=3, ntop=2,
        transform_param=dict(mean_value=[104, 117, 123], mirror=True),
        include=dict(phase=getattr(caffe_pb2, 'TEST')))
    
    Inception_ResNet_A_input = stem(bottom=data, conv1_num=32, conv2_num=32, conv3_num=64, 
         conv4_num=80, conv5_num=192, conv6_num=256)
    for i in xrange(stages[1]):
        Inception_ResNet_A_input = Inception_ResNet_A(bottom=Inception_ResNet_A_input, 
                                                    bottom_size=256, num1x1=32, num3x3=32)
    
    Inception_ResNet_B_input = ReductionA(bottom=Inception_ResNet_A_input, num1x1_k=192, num3x3_l=192, num3x3_n=256, num3x3_m=384)
    
    for i in xrange(stages[2]):
        Inception_ResNet_B_input = Inception_ResNet_B(bottom=Inception_ResNet_B_input, bottom_size=896, num1x1=128, num7x1=128, num1x7=128)
    
    Inception_ResNet_C_input = ReductionB(bottom=Inception_ResNet_B_input, num1x1=256, num3x3=384, num3x3double=256)
    
    for i in xrange(stages[3]):
        Inception_ResNet_C_input = Inception_ResNet_C(bottom=Inception_ResNet_C_input, bottom_size=1792, num1x1=192, num1x3=192, num3x1=192)
    
    glb_pool = L.Pooling(Inception_ResNet_C_input, pool=P.Pooling.AVE, global_pooling=True)
    dropout = L.Dropout(glb_pool, dropout_ratio = 0.2) 
    fc = L.InnerProduct(dropout, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #29
0
def create_net(lmdb, batch_size, include_acc=False):
    # 创建第一层:数据层。向上传递两类数据:图片数据和对应的标签
    data, label = L.Data(source=lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=40,
                                              mean_file=mean_file,
                                              mirror=True))
    # 创建第二屋:卷积层
    conv1 = L.Convolution(data,
                          kernel_size=5,
                          stride=1,
                          num_output=16,
                          pad=2,
                          weight_filler=dict(type='xavier'))
    # 创建激活函数层
    relu1 = L.ReLU(conv1, in_place=True)
    # 创建池化层
    pool1 = L.Pooling(relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    conv2 = L.Convolution(pool1,
                          kernel_size=3,
                          stride=1,
                          num_output=32,
                          pad=1,
                          weight_filler=dict(type='xavier'))
    relu2 = L.ReLU(conv2, in_place=True)
    pool2 = L.Pooling(relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    # 创建一个全连接层
    fc3 = L.InnerProduct(pool2,
                         num_output=1024,
                         weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(fc3, in_place=True)
    # 创建一个dropout层
    drop3 = L.Dropout(relu3, in_place=True)
    fc4 = L.InnerProduct(drop3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    # 创建一个softmax层
    loss = L.SoftmaxWithLoss(fc4, label)

    if include_acc:  # 在训练阶段,不需要accuracy层,但是在验证阶段,是需要的
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #30
0
def resnetx(depth=[3, 4, 6, 3], width=[32, 64, 128, 256]):
    #data, label = L.Data(source=data_file, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
    #          transform_param=dict(crop_size=32,mirror=True, mean_file="/home/bob/caffe-master/examples/cifar10/mean.binaryproto"))
    data, label = L.Data(
        source="/home/bob/caffe-master/examples/cifar10/cifar10_train_lmdb",
        backend=P.Data.LMDB,
        batch_size=64,
        ntop=2,
        transform_param=dict(
            crop_size=32,
            mirror=True,
            mean_file="/home/bob/caffe-master/examples/cifar10/mean.binaryproto"
        ))
    nchannels = 32
    model = L.Convolution(data,
                          kernel_size=3,
                          stride=1,
                          num_output=nchannels,
                          pad=1,
                          bias_term=False,
                          weight_filler=dict(type='msra'),
                          bias_filler=dict(type='constant'))
    model = L.BatchNorm(model,
                        in_place=False,
                        batch_norm_param=dict(use_global_stats=True))
    scale = L.Scale(model, bias_term=True, in_place=True)

    num_input = nchannels
    strides = [1] + [1] * (depth[0] - 1)
    for stride in strides:
        model = add_layer(model, num_input, width[0], stride)
        num_input = width[0] * 4

    strides = [2] + [1] * (depth[1] - 1)
    for stride in strides:
        model = add_layer(model, num_input, width[1], stride)
        num_input = width[1] * 4

    strides = [2] + [1] * (depth[2] - 1)
    for stride in strides:
        model = add_layer(model, num_input, width[2], stride)
        num_input = width[2] * 4

    strides = [2] + [1] * (depth[3] - 1)
    for stride in strides:
        model = add_layer(model, num_input, width[3], stride)
        num_input = width[3] * 4

    model = L.Pooling(model, pool=P.Pooling.AVE, global_pooling=True)
    model = L.InnerProduct(model,
                           num_output=10,
                           bias_term=True,
                           weight_filler=dict(type='xavier'),
                           bias_filler=dict(type='constant'))
    loss = L.SoftmaxWithLoss(model)
    #accuracy = L.Accuracy(model, label)
    return to_proto(loss)
예제 #31
0
 def test_caffe_import(self):
     top = L.SigmoidCrossEntropyLoss()
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertEqual(response['result'], 'success')
예제 #32
0
    def caffenet(self, lmdb, batch_size=32, include_acc=False):
        print ("building net")

        data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntops=2,
                             transform_param=dict(crop_size=84, mirror=True))
        
        # the net itself
        conv1, relu1 = self.conv_relu(data, 8, 32, stride=4)
        conv2, relu2 = self.conv_relu(relu1, 4, 16, stride=2)
        ip, relu3 = self.ip_relu(relu2, 256)        
        ip2 = L.InnerProduct(relu3, num_output=64)
        loss = L.SoftmaxWithLoss(ip2, label)

        if include_acc:
            acc = L.Accuracy(ip2, label)
            return to_proto(loss, acc)
        else:
            return to_proto(loss)
예제 #33
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--prototxt', choices=['net', 'deploy'], default='deploy', type=str)
    parser.add_argument('--sphereH', type=int, default=320)
    parser.add_argument('--ks', type=int, default=640)
    parser.add_argument('layer', choices=top_down.keys(), type=str)
    args = parser.parse_args()

    layer = args.layer
    layers = []
    while layer != "1_1":
        layers.append(layer)
        layer = top_down[layer]
    layers.reverse()

    if args.prototxt == "deploy":
        template = (
            "input: \"data\"\n"
            "input_shape {{\n"
            "  dim: 1\n"
            "  dim: 64\n"
            "  dim: {0}\n"
            "  dim: {1}\n"
            "}}\n"
        )
        sphereW = args.sphereH * 2
        proto_str = template.format(args.sphereH, sphereW)
        plane = None
        deploy = True
    elif args.prototxt == "net":
        plane, label = Data(args.layer, phase=0, sphereH=args.sphereH, ks=args.ks)
        proto_str = str(to_proto(plane))
        plane, label = Data(args.layer, phase=1, sphereH=args.sphereH, ks=args.ks)
        deploy = False
    else:
        raise ValueError("Invalid prototxt.")

    for layer in layers:
        plane = layer_structure(plane, layer, sphereH=args.sphereH, ks=args.ks, deploy=deploy)

    if args.prototxt == "net":
        plane =  Loss(plane, label)
    proto_str += str(to_proto(plane))
    print proto_str
예제 #34
0
 def test_caffe_import(self):
     top = L.LRN(local_size=5, alpha=1, beta=0.75, k=1, norm_region=1)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 5)
     self.assertEqual(response['result'], 'success')
예제 #35
0
 def test_caffe_import(self):
     top = L.MVN(normalize_variance=True, eps=1e-9, across_channels=False)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 3)
     self.assertEqual(response['result'], 'success')
예제 #36
0
 def test_caffe_import(self):
     top = L.BatchNorm(use_global_stats=True, moving_average_fraction=0.999, eps=1e-5)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 3)
     self.assertEqual(response['result'], 'success')
예제 #37
0
 def test_caffe_import(self):
     top = L.Log(base=-1.0, scale=1.0, shift=0.0)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 3)
     self.assertEqual(response['result'], 'success')
예제 #38
0
 def test_caffe_import(self):
     top = L.Bias(axis=1, num_axes=1, filler={'type': 'constant'})
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 3)
     self.assertEqual(response['result'], 'success')
예제 #39
0
 def test_caffe_import(self):
     data, label = L.HDF5Data(source='/dummy/source/', batch_size=32, ntop=2, shuffle=False)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(data, label)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 3)
     self.assertEqual(response['result'], 'success')
예제 #40
0
 def test_caffe_import(self):
     top = L.SoftmaxWithLoss(softmax_param=dict(axis=1))
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 1)
     self.assertEqual(response['result'], 'success')
예제 #41
0
def caffe_net(lmdb, mean_file, batch_size=24, phase=False):
    data, label = L.Data(
        source=lmdb,
        backend=P.Data.LMDB,
        batch_size=batch_size,
        ntop=2,
        transform_param=dict(crop_size=32, mean_file=mean_file, mirror=phase))
    fractal_unit = fractal_block(data, 64, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 128, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 256, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 512, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 512, phase, 4)
    fc = caffe_net_fun.full_connect(fractal_unit, 10)
    loss = L.SoftmaxWithLoss(fc, label)
    if not phase:
        acc = L.Accuracy(fc, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #42
0
def caffenet(lmdb, batch_size=32, include_acc=False):
    print ("building net")

    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
                         transform_param=dict(crop_size=84, mean_value=[104, 117, 123], mirror=True))
    
    # the net itself
    conv1, relu1 = conv_relu(data, 8, 32, stride=4)
    conv2, relu2 = conv_relu(relu1, 4, 16, stride=2)
    ip1 = L.InnerProduct(relu2, num_output=256)
    relu3 = L.ReLU(ip1, in_place=True)
    ip2 = L.InnerProduct(relu3, num_output=64)
    loss = L.SoftmaxWithLoss(ip2, label)

    if include_acc:
        acc = L.Accuracy(ip2, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #43
0
 def test_caffe_import(self):
     top = L.ContrastiveLoss(margin=1.0, legacy_version=False)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 2)
     self.assertEqual(response['result'], 'success')
예제 #44
0
def genLayer(image_path, batch_size, img_size, grid_size, info, indexMap):
    total_class = 0
    with open(indexMap, 'r') as f:
        for ln in f:
            total_class += 1
    data_size = 0
    with open(info, 'r') as f:
        for ln in f:
            line = ln.rstrip('\n')
            keyvalue = line.split(':')
            if keyvalue[0] == 'Number of record':
                data_size = int(keyvalue[1])
    param_str = "'image_path':'%s','batch_size':%d,'size':%d,'grid_size':%d,'total_class':%d,'total':%d,'bbox:2" % (
        image_path, batch_size, img_size, grid_size, total_class, data_size)
    dataLayer = L.Python(module='caffe.YoloDataLayer',
                         layer='YoloDataLayer',
                         param_str=param_str)
    print dataLayer
    to_proto(dataLayer)
예제 #45
0
 def test_caffe_import(self):
     data = L.Input(shape={'dim': [10, 3, 224, 224]})
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(data)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 1)
     self.assertEqual(response['result'], 'success')
예제 #46
0
 def test_caffe_import(self):
     data, label = L.MemoryData(batch_size=32, ntop=2, channels=3, height=224, width=224)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(data, label)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 4)
     self.assertEqual(response['result'], 'success')
예제 #47
0
 def test_caffe_import(self):
     top = L.Pooling(kernel_size=2, pad=0, stride=2, pool=1)
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 4)
     self.assertEqual(response['result'], 'success')
예제 #48
0
def normnet(train_lmdb,
            test_lmdb,
            batch_size=256,
            stages=[2, 2, 2, 2],
            input_size=32,
            first_output=32,
            include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=227,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=227,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))
    data, label = L.MemoryData(batch_size=batch_size,
                               height=input_size,
                               width=input_size,
                               channels=3,
                               ntop=2,
                               transform_param=dict(mean_value=[104, 117, 123],
                                                    mirror=True),
                               include=dict(phase=getattr(caffe_pb2, 'TEST')))

    # the net itself

    conv1 = conv_factory(data, 3, 96, 1, 1)
    in3a = SimpleFactory(conv1, 32, 32)
    in3b = SimpleFactory(in3a, 32, 48)
    in3c = DownsampleFactory(in3b, 80)
    in4a = SimpleFactory(in3c, 112, 48)
    in4b = SimpleFactory(in4a, 96, 64)
    in4c = SimpleFactory(in4b, 80, 80)
    in4d = SimpleFactory(in4c, 48, 96)
    #    for i in range(25):
    #        in4d = SimpleFactory(in4d, 48, 96)
    in4e = DownsampleFactory(in4d, 96)
    in5a = SimpleFactory(in4e, 176, 160)
    in5b = SimpleFactory(in5a, 176, 160)

    pool = avg_pool(in5b, 8)

    fc = L.InnerProduct(pool, num_output=10, weight_filler=dict(type='xavier'))
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
예제 #49
0
 def test_caffe_import(self):
     top = L.Embed(num_output=128, input_dim=2, bias_term=False,
                   weight_filler={'type': 'xavier'})
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 4)
     self.assertEqual(response['result'], 'success')
예제 #50
0
def ResNext(split, batch_size, ):
    DATAPATH_PREFIX = "../CIFAR10/"
    TRAIN_FILE = DATAPATH_PREFIX + "cifar10_train_lmdb"
    TEST_FILE  = DATAPATH_PREFIX + "cifar10_test_lmdb"
    MEAN_FILE  = DATAPATH_PREFIX + "mean.binaryproto"

    if split == "train":
        data, labels = L.Data(source = TRAIN_FILE, backend = P.Data.LMDB,
                              batch_size = batch_size, ntop = 2,
                              transform_param = dict(mean_file = MEAN_FILE, 
                                                     crop_size = 28, 
                                                     mirror = True))
    elif split == "test":
        data, labels = L.Data(source = TEST_FILE, backend = P.Data.LMDB,
                              batch_size = batch_size, ntop = 2,
                              transform_param = dict(mean_file = MEAN_FILE,
                                                     crop_size = 28))
    # When projection_stride == 1, the in and out dim same
    # When projection_stride == 2, the in and out dim diff, need 1x1, stride = 2 to project
    # O = (W - Kernel_Size + 2Padding) / Stride + 1
    repeat = 3
    # Conv1
    scale, result = conv_BN_scale_relu(split, data, nout = 16, ks = 3, stride = 1, pad = 1)
    # Conv2_X, the in and out dim are 16, and {32x32}, can add directly
    for ii in range(repeat):
        projection_stride = 1
        result = ResNet_block(split, result, nout = 16, ks = 3, stride = 1,
                                projection_stride = projection_stride, pad = 1)
    # Conv3_X
    for ii in range(repeat):
        # 只有在刚开始 conv2_X(16 x 16) 到 conv3_X(8 x 8) 的
        # 数据维度不一样,需要映射到相同维度,卷积映射的 stride 为 2
        if ii == 0:
            projection_stride = 2
        else:
            projection_stride = 1
        result = ResNet_block(split, result, nout = 32, ks = 3, stride = 1,
                                projection_stride = projection_stride, pad = 1)
    # Conv4_X
    for ii in range(repeat):
        if ii == 0:
            projection_stride = 2
        else:
            projection_stride = 1
        result = ResNet_block(split, result, nout = 64, ks = 3, stride = 1,
                            projection_stride = projection_stride, pad = 1)
    pool = L.Pooling(result, pool = P.Pooling.AVE, global_pooling = True)
    IP = L.InnerProduct(pool, num_output = 10, 
                            weight_filler = dict(type='xavier'),
                            bias_filler = dict(type='constant'))
    acc = L.Accuracy(IP, labels)
    loss = L.SoftmaxWithLoss(IP, labels)
    
    return to_proto(acc, loss)
예제 #51
0
def creat_net(lmdb,batch_size,include_acc=False):
    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=40,mean_file=mean_file,mirror=True))
    conv1=L.Convolution(data, kernel_size=5, stride=1,num_output=16, pad=2,weight_filler=dict(type='xavier'))
    relu1=L.ReLU(conv1, in_place=True)
    pool1=L.Pooling(relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    conv2=L.Convolution(pool1, kernel_size=3, stride=1,num_output=32, pad=1,weight_filler=dict(type='xavier'))
    relu2=L.ReLU(conv2, in_place=True)
    pool2=L.Pooling(relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    fc3=L.InnerProduct(pool2, num_output=1024,weight_filler=dict(type='xavier'))
    relu3=L.ReLU(fc3, in_place=True)
    drop3 = L.Dropout(relu3, in_place=True)
    fc4 = L.InnerProduct(drop3, num_output=10,weight_filler=dict(type='xavier'))
    loss = L.SoftmaxWithLoss(fc4, label)
    
    if include_acc:
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #52
0
파일: lenet.py 프로젝트: saicoco/_practice
def lenet(lmdb, batch_size=128):

    data, labels = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2)
    conv1, relu1 = conv(data, kernel_size=5, num_output=20)
    pool1 = pool(relu1)
    conv2, relu2 = conv(pool1, kernel_size=5, num_output=50)
    pool2 = pool(relu2)
    fc1, relu3 = FC(pool2, num_output=500)
    loss = L.SoftmaxWithLoss(fc1, labels)
    acc = L.Accuracy(fc1, labels)
    return to_proto(loss, acc)
예제 #53
0
 def test_caffe_import(self):
     top = L.Deconvolution(convolution_param=dict(kernel_size=3, pad=1, stride=1, num_output=128,
                           weight_filler={'type': 'xavier'}, bias_filler={'type': 'constant'}))
     with open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'w') as f:
         f.write(str(to_proto(top)))
     sample_file = open(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'), 'r')
     response = self.client.post(reverse('caffe-import'), {'file': sample_file})
     response = json.loads(response.content)
     os.remove(os.path.join(settings.BASE_DIR, 'media', 'test.prototxt'))
     self.assertGreaterEqual(len(response['net']['l0']['params']), 6)
     self.assertEqual(response['result'], 'success')
예제 #54
0
def resnet(lmdb,
           batch_size=64,
           stages=[2, 2, 2, 2],
           first_output=64,
           deploy=False):
    #it is tricky to produce the deploy prototxt file, as the data input is not from a layer, so we have to creat a workaround
    #producing training and testing prototxt files is pretty straight forward
    #n = caffe.NetSpec()
    if deploy == False:
        data, label = L.Data(source=lmdb,
                             backend=P.Data.LMDB,
                             batch_size=batch_size,
                             ntop=2,
                             transform_param=dict(crop_size=224,
                                                  mean_value=[104, 117, 123],
                                                  mirror=True))
    # produce data definition for deploy net
    else:
        input = "data"
        dim1 = 1
        dim2 = 1
        dim3 = 224
        dim4 = 224
        #make an empty "data" layer so the next layer accepting input will be able to take the correct blob name "data",
        #we will later have to remove this layer from the serialization string, since this is just a placeholder
        data = L.Layer()

    relu1 = conv_factory(data, 7, first_output, stride=2, pad=3)
    residual = max_pool(relu1, 3, stride=2)

    k = 0
    for i in stages:
        #    first_output *= 2
        for j in range(i):
            if j == 0:
                if k == 0:
                    residual = residual_factory_proj(residual, first_output, 1)
                    k += 1
                else:
                    residual = residual_factory_proj(residual, first_output, 2)
            else:
                residual = residual_factory1(residual, first_output)
        first_output *= 2

    glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True)
    fc = L.InnerProduct(glb_pool,
                        num_output=1000,
                        weight_filler=dict(type='xavier'))
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))

    #n.loss layer is only in training and testing nets, but not in deploy net.
    if deploy == False:
        loss = L.SoftmaxWithLoss(fc, label)
        return to_proto(loss, acc)
예제 #55
0
def Lenet(img_list, batch_size, include_acc=False):
    #第一层,数据输入层,以ImageData格式输入
    data, label = L.ImageData(source=img_list,
                              batch_size=batch_size,
                              ntop=2,
                              root_folder=root,
                              transform_param=dict(scale=0.00390625))
    #第二层:卷积层
    conv1, relu1 = conv_relu(data, 11, 96, stride=4, pad=0)
    #池化层
    pool1 = max_pool(relu1, 3, stride=2)
    norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)

    #卷积层
    conv2, relu2 = conv_relu(norm1, 5, 256, stride=4, pad=2, group=2)
    #池化层
    pool2 = max_pool(relu2, 3, stride=2)
    norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)

    conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
    conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
    conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)

    pool5 = max_pool(relu5, 3, stride=2)
    fc6, relu6 = fc_relu(pool5, 4096)
    drop6 = L.Dropout(relu6, in_place=True)
    fc7, relu7 = fc_relu(drop6, 4096)
    drop7 = L.Dropout(relu7, in_place=True)

    #全连接层
    fc8 = L.InnerProduct(drop7,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    #softmax层
    loss = L.SoftmaxWithLoss(fc8, label)

    if include_acc:  # test阶段需要有accuracy层
        acc = L.Accuracy(fc8, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #56
0
def create_net(lmdb, batch_size, include_acc=False):

    data, label = L.Data(source=lmdb,
                         name='mnist',
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(scale=0.00390625))

    conv1 = L.Convolution(data,
                          kernel_size=5,
                          stride=1,
                          num_output=20,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)

    conv2 = L.Convolution(pool1,
                          kernel_size=5,
                          stride=1,
                          num_output=50,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)

    fc3 = L.InnerProduct(pool2,
                         num_output=500,
                         weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(fc3, in_place=True)

    fc4 = L.InnerProduct(relu3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))

    loss = L.SoftmaxWithLoss(fc4, label)

    if include_acc:
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
예제 #57
0
    def make_net(self, mode):
        bs = self.solverfig['batch_size'] // self.solverfig['gpunum'] + 1
        if mode == 'deploy':
            data = L.Input(name='data',
                           ntop=1,
                           shape=dict(dim=self.datafig['imgsize']))
        elif mode == 'train':
            data, label = L.Data(name='data',
                                 source=self.datafig['train_file'],
                                 backend=P.Data.LMDB,
                                 batch_size=bs,
                                 ntop=2,
                                 transform_param=dict(mirror=True,
                                                      crop_size=32,
                                                      mean_value=[0, 0, 0],
                                                      scale=1))
        else:
            data, label = L.Data(name='data',
                                 source=self.datafig['test_file'],
                                 backend=P.Data.LMDB,
                                 batch_size=bs,
                                 ntop=2,
                                 transform_param=dict(mirror=True,
                                                      crop_size=32,
                                                      mean_value=[0, 0, 0],
                                                      scale=1))

        scale, relu = conv_BN_scale_relu(data, 64, 3, 1, 1, bias=False)
        stage_1 = self.block(relu, self.stages[0], self.stages[1], 1)
        stage_2 = self.block(stage_1, self.stages[1], self.stages[2], 2)
        stage_3 = self.block(stage_2, self.stages[2], self.stages[3], 2)
        fc = classfication_layer(stage_3, self.datafig['nlabels'])

        if mode == 'deploy':
            prob = L.Softmax(fc, name='prob')
            return to_proto(prob)
        else:
            loss = L.SoftmaxWithLoss(fc, label)
            acc = L.Accuracy(fc, label)
            return to_proto(loss, acc)