def create_Net (lmdb,batch_size,C1,C2,C3):
	n = caffe.NetSpec()
	n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
							transform_param=dict(scale=1./255), ntop=2)
	n.conv1 = L.Convolution(n.data, kernel_size=3, num_output=C1, weight_filler=dict(type='xavier'))
	n.tanh1 = L.TanH(n.conv1, in_place=True)
	n.pool1 = L.Pooling(n.tanh1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
	n.conv2 = L.Convolution(n.pool1, kernel_size=3, num_output=C2, weight_filler=dict(type='xavier'))
	n.tanh2 = L.TanH(n.conv2, in_place=True)
	n.pool2 = L.Pooling(n.tanh2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
	n.conv3 = L.Convolution(n.pool2, kernel_size=3, num_output=C3, weight_filler=dict(type='xavier'))
	n.tanh3 = L.TanH(n.conv3, in_place=True)
	n.fc = L.InnerProduct(n.tanh3, num_output=10, weight_filler=dict(type='xavier'))
	n.tanh4 = L.TanH(n.fc, in_place=True)
	n.cla = L.SoftmaxWithLoss(n.fc,n.label)
	n.acc = L.Accuracy(n.fc,n.label)
	return n.to_proto()
Esempio n. 2
0
def anon_lenet(batch_size):
    data, label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
                                     dict(dim=[batch_size, 1, 1, 1])],
                              transform_param=dict(scale=1./255), ntop=2)
    conv1 = L.Convolution(data, kernel_size=5, num_output=20,
        weight_filler=dict(type='xavier'))
    pool1 = L.Pooling(conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    conv2 = L.Convolution(pool1, kernel_size=5, num_output=50,
        weight_filler=dict(type='xavier'))
    pool2 = L.Pooling(conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    ip1 = L.InnerProduct(pool2, num_output=500,
        weight_filler=dict(type='xavier'))
    relu1 = L.ReLU(ip1, in_place=True)
    ip2 = L.InnerProduct(relu1, num_output=10,
        weight_filler=dict(type='xavier'))
    loss = L.SoftmaxWithLoss(ip2, label)
    return loss.to_proto()
Esempio n. 3
0
    def lenet_bn_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = False
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data,
                                 backend=P.Data.LMDB,
                                 batch_size=batch_size,
                                 ntop=2,
                                 transform_param=dict(scale=0.00390625,
                                                      mirror=mirror))

        n.conv1 = L.Convolution(n.data,
                                kernel_size=5,
                                num_output=20,
                                stride=1,
                                weight_filler=dict(type='xavier'),
                                bias_filler=dict(type='constant'))
        n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False)
        n.pool1 = L.Pooling(n.bn1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
        n.conv2 = L.Convolution(n.pool1,
                                kernel_size=5,
                                num_output=50,
                                stride=1,
                                weight_filler=dict(type='xavier'),
                                bias_filler=dict(type='constant'))
        n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False)
        n.pool2 = L.Pooling(n.bn2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
        n.ip1 = L.InnerProduct(n.pool2,
                               num_output=500,
                               weight_filler=dict(type='xavier'),
                               bias_filler=dict(type='constant'))
        n.relu1 = L.ReLU(n.ip1, in_place=True)
        n.ip2 = L.InnerProduct(n.relu1,
                               num_output=self.classifier_num,
                               weight_filler=dict(type='xavier'),
                               bias_filler=dict(type='constant'))
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy = L.Accuracy(n.ip2, n.label, include=dict(phase=1))
        n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
        return n.to_proto()
Esempio n. 4
0
def Lenet(img_list, batch_size, include_acc=False):
    #第一层,数据输入层,以ImageData格式输入--->这个层是直接用图像作为输入
    #    data, label = L.ImageData(source=img_list, batch_size=batch_size, ntop=2,root_folder=root,
    #        transform_param=dict(scale= 0.00390625))
    data, label = L.Data(
        batch_size=batch_size,
        backend=P.Data.LMDB,
        source=img_list,
        transform_param=dict(scale=1. / 255),  #缩放比例
        ntop=2)
    #第二层:卷积层
    conv1 = L.Convolution(data,
                          kernel_size=5,
                          stride=1,
                          num_output=20,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    #池化层
    pool1 = L.Pooling(conv1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    #卷积层
    conv2 = L.Convolution(pool1,
                          kernel_size=5,
                          stride=1,
                          num_output=50,
                          pad=0,
                          weight_filler=dict(type='xavier'))
    #池化层
    pool2 = L.Pooling(conv2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    #全连接层
    fc3 = L.InnerProduct(pool2,
                         num_output=500,
                         weight_filler=dict(type='xavier'))
    #激活函数层
    relu3 = L.ReLU(fc3, in_place=True)
    #全连接层
    fc4 = L.InnerProduct(relu3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    #softmax层
    loss = L.SoftmaxWithLoss(fc4, label)

    if include_acc:  # test阶段需要有accuracy层
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
Esempio n. 5
0
def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                             transform_param=dict(scale=1./255), ntop=2)
    
    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20, weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50, weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.fc1 =   L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.score = L.InnerProduct(n.relu1, num_output=10, weight_filler=dict(type='xavier'))
    n.loss =  L.SoftmaxWithLoss(n.score, n.label)
    
    return n.to_proto()
def densenet(data_file, architecture = 'densenet_121', mode='train', batch_size=64, no_of_classes = 3, dropout=0.2):
    
    architecture_map = dict({'densenet_121':[6, 12, 24, 16], 'densenet_169':[6, 12, 32, 32], 'densenet_201':[6, 12, 48, 32], 'densenet_264':[6, 12, 64, 48]})

    no_dense_blocks_ar = architecture_map[architecture]

    data, label = L.Data(source=data_file, backend=P.Data.LMDB, batch_size=batch_size, ntop=2, 
              transform_param=dict(mean_file="/home/achyut/Desktop/classifier_sorted_rgb/lmdb/mean.binaryproto"))

    nchannels = 112
    model = L.Convolution(data, kernel_size=7, stride=2, num_output=nchannels,
                        pad=1, bias_term=False, weight_filler=dict(type='msra'), bias_filler=dict(type='constant'))
    model = L.Pooling(model, pool=P.Pooling.MAX, kernel_size=3, stride=2)


    nchannels = first_output/2

    for i in range(no_of_dense_blocks_ar[0]):
        model = dense_block(model, nchannels, dropout)
    model = transition_block(model, nchannels, dropout)
    nchannels /= 2

    for i in range(no_of_dense_blocks_ar[1]):
        model = dense_block(model, nchannels, dropout)
    model = transition_block(model, nchannels, dropout)
    nchannels /= 2

    for i in range(no_of_dense_blocks_[2]):
        model = dense_block(model, nchannels, dropout)
    model = transition_block(model, nchannels, dropout)
    nchannels /= 2

    for i in range(no_of_dense_blocks_ar[3]):
        model = dense_block(model, nchannels, dropout)
    model = transition_block(model, nchannels, dropout)
    nchannels /= 2


    model = L.BatchNorm(model, in_place=False, param=[dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)])
    model = L.Scale(model, bias_term=True, in_place=True, filler=dict(value=1), bias_filler=dict(value=0))
    model = L.ReLU(model, in_place=True)
    model = L.Pooling(model, pool=P.Pooling.AVE, global_pooling=True)
    model = L.InnerProduct(model, num_output=no_of_classes, bias_term=True, weight_filler=dict(type='xavier'), bias_filler=dict(type='constant'))
    loss = L.SoftmaxWithLoss(model, label)
    accuracy = L.Accuracy(model, label)
    return to_proto(loss, accuracy)
Esempio n. 7
0
def resnet(train_lmdb,
           test_lmdb,
           batch_size=256,
           stages=[2, 2, 2, 2],
           first_output=32,
           include_acc=False):
    # now, this code can't recognize include phase, so there will only be a TEST phase data layer
    data, label = L.Data(source=train_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=227,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TRAIN')))
    data, label = L.Data(source=test_lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=227,
                                              mean_value=[104, 117, 123],
                                              mirror=True),
                         include=dict(phase=getattr(caffe_pb2, 'TEST')))

    # the net itself
    relu1 = conv_factory_relu(data, 3, first_output, stride=1, pad=1)
    relu2 = conv_factory_relu(relu1, 3, first_output, stride=1, pad=1)
    residual = max_pool(relu2, 3, stride=2)

    for i in stages[1:]:
        first_output *= 2
        for j in range(i):
            if j == 0:
                if i == 0:
                    residual = residual_factory_proj(residual, first_output, 1)
                else:
                    residual = residual_factory_proj(residual, first_output, 2)
            else:
                residual = residual_factory1(residual, first_output)

    glb_pool = L.Pooling(residual, pool=P.Pooling.AVE, global_pooling=True)
    fc = L.InnerProduct(glb_pool, num_output=1000)
    loss = L.SoftmaxWithLoss(fc, label)
    acc = L.Accuracy(fc, label, include=dict(phase=getattr(caffe_pb2, 'TEST')))
    return to_proto(loss, acc)
Esempio n. 8
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)

    mode = 'train' if train else 'test'
    filename = path_savemodel + num_fold + '_' + typeNet + '_' + mode + '.prototxt'
    with open(filename, 'w') as f:
        f.write(str(n.to_proto()))
        return filename
Esempio n. 9
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        return f.name
Esempio n. 10
0
def nonlinear_net(hdf5, batch_size):
    # one small nonlinearity, one leap for model kind
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    # define a hidden layer of dimension 40
    n.ip1 = L.InnerProduct(n.data,
                           num_output=40,
                           weight_filler=dict(type='xavier'))
    # transform the output through the ReLU (rectified linear) non-linearity
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    # score the (now non-linear) features
    n.ip2 = L.InnerProduct(n.ip1,
                           num_output=2,
                           weight_filler=dict(type='xavier'))
    # same accuracy and loss as before
    n.accuracy = L.Accuracy(n.ip2, n.label)
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
Esempio n. 11
0
    def _loss_proto(self, xPr, xLb, xSemPr, xSemLb, lCW):
        ns = self.netspec

        # Classification loss
        if self.sem_coeff < 1:
            name = 'SCoRe/objLoss'
            ns[name] = L.SoftmaxWithLoss(*[xPr, xLb],
                                         name=name,
                                         loss_weight=1.0 - self.sem_coeff,
                                         include=dict(phase=caffe.TRAIN))

        # Semantic regularization
        if self.sem_coeff > 0:
            self._semantic_regularization(xSemPr, xSemLb, self.sem_coeff)

        # Codeword regularization
        if 0 < self.code_coeff < np.inf:
            self._code_regularization(lCW)
Esempio n. 12
0
def fcn(split, tops):
    n = caffe.NetSpec()
    n.color, n.hha, n.label = L.Python(module='nyud_layers',
            layer='NYUDSegDataLayer', ntop=3,
            param_str=str(dict(nyud_dir='../data/nyud', split=split,
                tops=tops, seed=1337)))
    n = modality_fcn(n, 'color', 'color')
    n = modality_fcn(n, 'hha', 'hha')
    n.score_fused = L.Eltwise(n.score_frcolor, n.score_frhha,
            operation=P.Eltwise.SUM, coeff=[0.5, 0.5])
    n.upscore = L.Deconvolution(n.score_fused,
        convolution_param=dict(num_output=40, kernel_size=64, stride=32,
            bias_term=False),
        param=[dict(lr_mult=0)])
    n.score = crop(n.upscore, n.color)
    n.loss = L.SoftmaxWithLoss(n.score, n.label,
            loss_param=dict(normalize=False, ignore_label=255))
    return n.to_proto()
Esempio n. 13
0
def net(img_list, batch_size, mean_value=0):
    n = caffe.NetSpec()
    n.data, n.label = L.ImageData(source=img_list,
                                  batch_size=batch_size,
                                  new_width=28,
                                  new_height=28,
                                  ntop=2,
                                  transform_param=dict(scale=1 / 255.0,
                                                       mean_value=mean_value))
    n.ip1 = L.InnerProduct(n.data,
                           num_output=50,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1,
                           num_output=10,
                           weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
Esempio n. 14
0
def lenet(batch_size):
    n = caffe.NetSpec()
    n.data, n.label = L.DummyData(shape=[dict(dim=[batch_size, 1, 28, 28]),
                                         dict(dim=[batch_size, 1, 1, 1])],
                                  transform_param=dict(scale=1./255), ntop=2)
    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=20,
        weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv2 = L.Convolution(n.pool1, kernel_size=5, num_output=50,
        weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.ip1 = L.InnerProduct(n.pool2, num_output=500,
        weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1, num_output=10,
        weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
Esempio n. 15
0
    def gen_net(net_path, data_shape, label_shape):
        net = caffe.NetSpec()

        net.data = L.Input(shape=dict(dim=list(data_shape)))
        net.label = L.Input(shape=dict(dim=list(label_shape)))

        net.conv1 = L.Convolution(net.data,
                                  kernel_size=5,
                                  num_output=48,
                                  weight_filler=dict(type='xavier'))
        net.pool1 = L.Pooling(net.conv1,
                              kernel_size=2,
                              stride=2,
                              pool=P.Pooling.MAX)
        net.conv2 = L.Convolution(net.pool1,
                                  kernel_size=3,
                                  num_output=128,
                                  weight_filler=dict(type='xavier'))
        net.pool2 = L.Pooling(net.conv2,
                              kernel_size=2,
                              stride=2,
                              pool=P.Pooling.MAX)
        net.conv3 = L.Convolution(net.pool2,
                                  kernel_size=1,
                                  num_output=512,
                                  weight_filler=dict(type='xavier'))
        net.fc1 = L.InnerProduct(net.conv3,
                                 num_output=1000,
                                 weight_filler=dict(type='xavier'))
        net.relu1 = L.ReLU(net.fc1, in_place=True)
        net.dropout1 = L.Dropout(net.relu1, dropout_ratio=0.5)
        net.fc2 = L.InnerProduct(net.dropout1,
                                 num_output=1000,
                                 weight_filler=dict(type='xavier'))
        net.relu2 = L.ReLU(net.fc2, in_place=True)
        net.output = L.InnerProduct(net.relu2,
                                    num_output=10,
                                    weight_filler=dict(type='xavier'))

        net.loss = L.SoftmaxWithLoss(net.output, net.label)
        net.accuracy = L.Accuracy(net.output, net.label)

        with open(net_path, 'w') as f:
            f.write(str(net.to_proto()))
Esempio n. 16
0
    def alexnet_bn_proto(self, batch_size, phase='TRAIN'):
        n = caffe.NetSpec()
        if phase == 'TRAIN':
            source_data = self.train_data
            mirror = True
        else:
            source_data = self.test_data
            mirror = False
        n.data, n.label = L.Data(source=source_data, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
                                 transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=mirror))

        n.conv1, n.conv1_bn, n.conv1_scale, n.conv1_relu = \
            factorization_conv_bn_scale_relu(n.data, num_output=96, kernel_size=11, stride=4,)  # 96x55x55
        n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 96x27x27

        n.conv2, n.conv2_bn, n.conv2_scale, n.conv2_relu = \
            factorization_conv_bn_scale_relu(n.pool1, num_output=256, kernel_size=5, pad=2)  # 256x27x27
        n.pool2 = L.Pooling(n.conv2, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 256x13x13

        n.conv3, n.conv3_bn, n.conv3_scale, n.conv3_relu = \
            factorization_conv_bn_scale_relu(n.pool2, num_output=384, kernel_size=3, pad=1)  # 384x13x13

        n.conv4, n.conv4_bn, n.conv4_scale, n.conv4_relu = \
            factorization_conv_bn_scale_relu(n.conv3, num_output=384, kernel_size=3, pad=1)  # 384x13x13

        n.conv5, n.conv5_bn, n.conv5_scale, n.conv5_relu = \
            factorization_conv_bn_scale_relu(n.conv4, num_output=256, kernel_size=3, pad=1)  # 256x13x13
        n.pool5 = L.Pooling(n.conv5, kernel_size=3, stride=2, pool=P.Pooling.MAX)  # 256x6x16

        n.fc6, n.relu6, n.drop6 = fc_relu_drop(n.pool5, num_output=4096)  # 4096x1x1
        n.fc7, n.relu7, n.drop7 = fc_relu_drop(n.fc6, num_output=4096)  # 4096x1x1
        n.fc8 = L.InnerProduct(n.fc7, num_output=self.classifier_num,
                               param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                               weight_filler=dict(type='gaussian', std=0.01),
                               bias_filler=dict(type='constant', value=0))
        if phase == 'TRAIN':
            pass
        else:
            n.accuracy_top1 = L.Accuracy(n.fc8, n.label, include=dict(phase=1))
            n.accuracy_top5 = L.Accuracy(n.fc8, n.label, include=dict(phase=1),
                                               accuracy_param=dict(top_k=5))
        n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
        
        return n.to_proto()
Esempio n. 17
0
def lenet(param):
    n = caffe.NetSpec()
    n.data, n.label = L.Python(module='wwt_data_layer',
                               layer='wwtdatalayer',
                               ntop=2,
                               param_str=str(param))
    n.bn1 = L.BatchNorm(n.data,
                        batch_norm_param=dict(moving_average_fraction=0.90,
                                              use_global_stats=False,
                                              eps=1e-5),
                        in_place=True)

    n.conv1 = L.Convolution(n.bn1,
                            kernel_size=5,
                            num_output=64,
                            weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.conv1, in_place=True)
    n.pool1 = L.Pooling(n.relu1, kernel_size=3, stride=2, pool=P.Pooling.MAX)
    n.relu2 = L.ReLU(n.pool1, in_place=True)

    n.conv2 = L.Convolution(n.relu2,
                            kernel_size=5,
                            num_output=64,
                            weight_filler=dict(type='xavier'))
    n.relu3 = L.ReLU(n.conv2, in_place=True)
    n.pool3 = L.Pooling(n.relu3, kernel_size=3, stride=2, pool=P.Pooling.MAX)
    n.relu4 = L.ReLU(n.pool3, in_place=True)

    n.fc1 = L.InnerProduct(n.relu4,
                           num_output=384,
                           weight_filler=dict(type='xavier'))
    n.relu5 = L.ReLU(n.fc1, in_place=True)

    n.fc2 = L.InnerProduct(n.relu5,
                           num_output=192,
                           weight_filler=dict(type='xavier'))
    n.relu6 = L.ReLU(n.fc2, in_place=True)

    n.score = L.InnerProduct(n.relu6,
                             num_output=10,
                             weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.score, n.label)

    return n.to_proto()
Esempio n. 18
0
def create_net(lmdb, batch_size, include_acc=False):
    #创建第一层:数据层。向上传递两类数据:图片数据和对应的标签
    data, label = L.Data(source=lmdb,
                         backend=P.Data.LMDB,
                         batch_size=batch_size,
                         ntop=2,
                         transform_param=dict(crop_size=40, mirror=True))
    #创建第二屋:卷积层
    conv1 = L.Convolution(data,
                          kernel_size=5,
                          stride=1,
                          num_output=16,
                          pad=2,
                          weight_filler=dict(type='xavier'))
    #创建激活函数层
    relu1 = L.ReLU(conv1, in_place=True)
    #创建池化层
    pool1 = L.Pooling(relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    conv2 = L.Convolution(pool1,
                          kernel_size=3,
                          stride=1,
                          num_output=32,
                          pad=1,
                          weight_filler=dict(type='xavier'))
    relu2 = L.ReLU(conv2, in_place=True)
    pool2 = L.Pooling(relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    #创建一个全连接层
    fc3 = L.InnerProduct(pool2,
                         num_output=1024,
                         weight_filler=dict(type='xavier'))
    relu3 = L.ReLU(fc3, in_place=True)
    #创建一个dropout层
    drop3 = L.Dropout(relu3, in_place=True)
    fc4 = L.InnerProduct(drop3,
                         num_output=10,
                         weight_filler=dict(type='xavier'))
    #创建一个softmax层
    loss = L.SoftmaxWithLoss(fc4, label)

    if include_acc:  #在训练阶段,不需要accuracy层,但是在验证阶段,是需要的
        acc = L.Accuracy(fc4, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
Esempio n. 19
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)

    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)

    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)

    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        return f.name
Esempio n. 20
0
    def gen_net(net_path, data_shape, label_shape):
        net = caffe.NetSpec()

        net.data = L.Input(shape=dict(dim=list(data_shape)))
        net.label = L.Input(shape=dict(dim=list(label_shape)))

        net.fc0 = L.InnerProduct(net.data,
                                 num_output=30,
                                 weight_filler=dict(type='xavier'))
        net.relu0 = L.ReLU(net.fc0, in_place=True)
        net.output = L.InnerProduct(net.relu0,
                                    num_output=10,
                                    weight_filler=dict(type='xavier'))

        net.loss = L.SoftmaxWithLoss(net.output, net.label)
        net.accuracy = L.Accuracy(net.output, net.label)

        with open(net_path, 'w') as f:
            f.write(str(net.to_proto()))
Esempio n. 21
0
def colorization_net(train_data, test_data, batch_size):
    net = caffe.NetSpec()
    net.data = L.Data(batch_size=batch_size,
                      backend=P.Data.LMDB,
                      source=train_data,
                      phase=caffe.TRAIN)
    net.conv1 = L.Convolution(net.data,
                              kernel_size=5,
                              num_output=20,
                              weight_filler=dict(type='xavier'))
    net.pool1 = L.Pooling(net.conv1,
                          kernel_size=2,
                          stride=2,
                          pool=P.Pooling.MAX)
    net.ip1 = L.InnerProduct(net.pool1,
                             num_output=10,
                             weight_filler=dict(type='xavier'))
    net.loss = L.SoftmaxWithLoss(net.ip1, net.data)
    return net.to_proto()
Esempio n. 22
0
def classificationNet(h5, batch_size, layerNeuronNum, layerNum, classNum,
                      learned_param):
    n = caffe.NetSpec()

    n.data, n.label = L.HDF5Data(source=h5,
                                 batch_size=batch_size,
                                 shuffle=True,
                                 ntop=2)
    flatdata = L.Flatten(n.data)
    flatdata_name = 'flatdata'
    n.__setattr__(flatdata_name, flatdata)

    param = learned_param
    for l in range(layerNum):
        if l == 0:
            encoder_name_last = flatdata_name
        else:
            encoder_name_last = relu_en_name

        encoder = L.InnerProduct(n[encoder_name_last],
                                 num_output=layerNeuronNum[l + 1],
                                 param=param,
                                 weight_filler=dict(type='gaussian',
                                                    std=0.005),
                                 bias_filler=dict(type='constant', value=0.1))
        encoder_name = 'encoder' + str(l + 1)
        n.__setattr__(encoder_name, encoder)

        relu_en = L.ReLU(n[encoder_name], in_place=True)
        relu_en_name = 'relu_en' + str(l + 1)
        n.__setattr__(relu_en_name, relu_en)

    output = L.InnerProduct(n[relu_en_name],
                            num_output=classNum,
                            param=param,
                            weight_filler=dict(type='gaussian', std=0.005),
                            bias_filler=dict(type='constant', value=0.1))
    output_name = 'output'
    n.__setattr__(output_name, output)

    n.loss = L.SoftmaxWithLoss(n[output_name], n.label)

    return n.to_proto()
def mknet(lmdb, batch_size, gg_max):
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size,
                             backend=P.Data.LMDB,
                             source=lmdb,
                             transform_param=dict(scale=1. / 255),
                             ntop=2)
    n.fc1 = L.InnerProduct(n.data,
                           num_output=128,
                           weight_filler=dict(type='gaussian', std=gg_max),
                           bias_filler=dict(type='constant'))
    n.relu1 = L.ReLU(n.fc1, in_place=True)
    n.fc2 = L.InnerProduct(n.relu1,
                           num_output=10,
                           weight_filler=dict(type='gaussian', std=gg_max),
                           bias_filler=dict(type='constant'))
    n.loss = L.SoftmaxWithLoss(n.fc2, n.label)
    n.acc = L.Accuracy(n.fc2, n.label)
    return n.to_proto()
def net(img_list, batch_size, mean_value=0):
    n = caffe.NetSpec()
    n.data, n.label = L.ImageData(source=img_list,
                                  batch_size=batch_size,
                                  new_width=28,
                                  new_height=28,
                                  ntop=2,
                                  transform_param=dict(mean_value=mean_value))
    n.ip1 = L.InnerProduct(n.data,
                           num_output=50,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1,
                           num_output=4,
                           weight_filler=dict(type='xavier'))
    n.relu2 = L.ReLU(n.ip2, in_place=True)
    n.loss = L.SoftmaxWithLoss(n.relu2, n.label)
    n.accu = L.Accuracy(n.relu2, n.label, include={'phase': caffe.TEST})
    return n.to_proto()
Esempio n. 25
0
def create_net(lmdb, batch_size, net_typeflg = 0):
    n = caffe.NetSpec()
    #创建第一层:数据层。向上传递两类数据:图片数据和对应的标签
    # data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
    #     transform_param=dict(crop_size=40,mean_file=mean_file,mirror=True))
    if net_typeflg != NET_TYPE_PREDICT:
        n.data, n.label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2
            ,transform_param=dict(scale=1./255))
        #创建第二屋:卷积层
        n.conv1=L.Convolution(n.data, kernel_size=5, stride=1, num_output=20, weight_filler=dict(type='xavier'))
    else:
        n.conv1=L.Convolution(bottom='data', kernel_size=5, stride=1, num_output=20,weight_filler=dict(type='xavier'))
    
    #创建激活函数层
    n.relu1=L.ReLU(n.conv1, in_place=True)
    #创建池化层
    n.pool1=L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    

    n.conv2=L.Convolution(n.pool1, kernel_size=5, stride=1, num_output=50, weight_filler=dict(type='xavier'))
    n.relu2=L.ReLU(n.conv2, in_place=True)
    n.pool2=L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=2, stride=2)
    
    #创建一个全连接层
    n.fc3=L.InnerProduct(n.pool2, num_output=500, weight_filler=dict(type='xavier'))
    n.relu3=L.ReLU(n.fc3, in_place=True)
    #创建一个dropout层
    n.drop3 = L.Dropout(n.relu3, in_place=True)
    n.fc4 = L.InnerProduct(n.drop3, num_output=10, weight_filler=dict(type='xavier'))
    #创建一个softmax层
    if net_typeflg != NET_TYPE_PREDICT:
        n.loss = L.SoftmaxWithLoss(n.fc4, n.label)
    
    #在训练阶段,不需要accuracy层,但是在验证阶段,是需要的
    if net_typeflg == NET_TYPE_TRAIN:             
        return n.to_proto()
    elif net_typeflg == NET_TYPE_TEST:
        n.score = n.fc4
        # n.acc = L.Accuracy(n.fc4, n.label)
        return n.to_proto()
    elif net_typeflg == NET_TYPE_PREDICT:
        n.result = L.Softmax(n.fc4)
        return n.to_proto()
Esempio n. 26
0
def caffe_net(lmdb, mean_file, batch_size=24, phase=False):
    data, label = L.Data(
        source=lmdb,
        backend=P.Data.LMDB,
        batch_size=batch_size,
        ntop=2,
        transform_param=dict(crop_size=32, mean_file=mean_file, mirror=phase))
    fractal_unit = fractal_block(data, 64, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 128, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 256, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 512, phase, 4)
    fractal_unit = fractal_block(fractal_unit, 512, phase, 4)
    fc = caffe_net_fun.full_connect(fractal_unit, 10)
    loss = L.SoftmaxWithLoss(fc, label)
    if not phase:
        acc = L.Accuracy(fc, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
Esempio n. 27
0
def caffenet(lmdb, batch_size=32, include_acc=False):
    print ("building net")

    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
                         transform_param=dict(crop_size=84, mean_value=[104, 117, 123], mirror=True))
    
    # the net itself
    conv1, relu1 = conv_relu(data, 8, 32, stride=4)
    conv2, relu2 = conv_relu(relu1, 4, 16, stride=2)
    ip1 = L.InnerProduct(relu2, num_output=256)
    relu3 = L.ReLU(ip1, in_place=True)
    ip2 = L.InnerProduct(relu3, num_output=64)
    loss = L.SoftmaxWithLoss(ip2, label)

    if include_acc:
        acc = L.Accuracy(ip2, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
def residual_net(total_depth, data_layer_params, num_classes = 1000, acclayer = True):
    """
    Generates nets from "Deep Residual Learning for Image Recognition". Nets follow architectures outlined in Table 1. 
    """
    # figure out network structure
    net_defs = {
        18:([2, 2, 2, 2], "standard"),
        34:([3, 4, 6, 3], "standard"),
        50:([3, 4, 6, 3], "bottleneck"),
        101:([3, 4, 23, 3], "bottleneck"),
        152:([3, 8, 36, 3], "bottleneck"),
    }
    assert total_depth in net_defs.keys(), "net of depth:{} not defined".format(total_depth)

    nunits_list, unit_type = net_defs[total_depth] # nunits_list a list of integers indicating the number of layers in each depth.
    nouts = [64, 128, 256, 512] # same for all nets

    # setup the first couple of layers
    n = caffe.NetSpec()
    n.data, n.label = L.Python(module = 'beijbom_caffe_data_layers', layer = 'ImageNetDataLayer',
                ntop = 2, param_str=str(data_layer_params))
    n.conv1, n.bn1, n.lrn1 = conv_bn(n.data, ks = 7, stride = 2, nout = 64, pad = 3)
    n.relu1 = L.ReLU(n.lrn1, in_place=True)
    n.pool1 = L.Pooling(n.relu1, stride = 2, kernel_size = 3)
    
    # make the convolutional body
    for nout, nunits in zip(nouts, nunits_list): # for each depth and nunits
        for unit in range(1, nunits + 1): # for each unit. Enumerate from 1.
            s = str(nout) + '_' + str(unit) + '_' # layer name prefix
            if unit_type == "standard":
                residual_standard_unit(n, nout, s, newdepth = unit is 1 and nout > 64)
            else:
                residual_bottleneck_unit(n, nout, s, newdepth = unit is 1)
                
    # add the end layers                    
    n.global_pool = L.Pooling(n.__dict__['tops'][n.__dict__['tops'].keys()[-1]], pooling_param = dict(pool = 1, global_pooling = True))
    n.score = L.InnerProduct(n.global_pool, num_output = num_classes,
        param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
    n.loss = L.SoftmaxWithLoss(n.score, n.label)
    if acclayer:
        n.accuracy = L.Accuracy(n.score, n.label)

    return n            
Esempio n. 29
0
def fcn(split):
    n = caffe.NetSpec()
    pydata_params = dict(split=split, mean=(104.00699, 116.66877, 122.67892),
            seed=1337)
    if split == 'train':
        pydata_params['sbdd_dir'] = '../data/sbdd/dataset'
        pylayer = 'SBDDSegDataLayer'
    else:
        pydata_params['voc_dir'] = '../data/pascal/VOC2011'
        pylayer = 'VOCSegDataLayer'
    n.data, n.label = L.Python(module='voc_layers', layer=pylayer,
            ntop=2, param_str=str(pydata_params))

    # the base net
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, pad=100)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
    n.pool5 = max_pool(n.relu5, 3, stride=2)

    # fully conv
    n.fc6, n.relu6 = conv_relu(n.pool5, 6, 4096)
    n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
    n.fc7, n.relu7 = conv_relu(n.drop6, 1, 4096)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)

    n.score_fr = L.Convolution(n.drop7, num_output=21, kernel_size=1, pad=0,
        param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])
    n.upscore = L.Deconvolution(n.score_fr,
        convolution_param=dict(num_output=21, kernel_size=63, stride=32,
            bias_term=False),
        param=[dict(lr_mult=0)])
    n.score = crop(n.upscore, n.data)
    n.loss = L.SoftmaxWithLoss(n.score, n.label,
            loss_param=dict(normalize=True, ignore_label=255))

    return n.to_proto()
Esempio n. 30
0
def logreg(hdf5, batch_size):
    # read in the data
    n = caffe.NetSpec()
    n.data, n.label = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=2)
    # a bit of preprocessing - helpful!
    n.log = L.Log(n.data, base=-1, scale=1, shift=1)
    n.norm = L.BatchNorm(n.log, use_global_stats=False)
    n.scaled = L.Scale(n.norm, bias_term=True)
    # the actual regression - the core of what we want to do!
    n.dropout = L.Dropout(n.scaled, dropout_ratio=0.5)
    n.ip = L.InnerProduct(n.dropout,
                          num_output=nCategories,
                          weight_filler=dict(type='xavier'))
    # don't mess with these. They don't affect learning.
    n.prob = L.Softmax(n.ip)
    n.accuracy1 = L.Accuracy(n.prob, n.label)
    if nCategories > 5:
        n.accuracy5 = L.Accuracy(n.prob, n.label, top_k=5)
    n.loss = L.SoftmaxWithLoss(n.ip, n.label)
    return n.to_proto()