コード例 #1
0
def caffenet_multilabel(data_layer_params, datalayer):
    # setup the python data layer
    n = caffe.NetSpec()
    n.data, n.label = L.Python(module='pascal_multilabel_datalayers',
                               layer=datalayer,
                               ntop=2,
                               param_str=str(data_layer_params))

    # the net itself
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096)
    n.drop6 = L.Dropout(n.relu6, in_place=True)
    n.fc7, n.relu7 = fc_relu(n.drop6, 4096)
    n.drop7 = L.Dropout(n.relu7, in_place=True)
    n.score = L.InnerProduct(n.drop7, num_output=20)
    n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label)

    return str(n.to_proto())
コード例 #2
0
def acn(source, loss_param, batch_size, include, shuffle, mirror):
    n = caffe.NetSpec()
    n.data, n.label = L.MultiImageData(include=include,
                               transform_param=dict(mirror=mirror, crop_size=227, mean_value=[104, 117, 123]),
                               multiimage_data_param=dict(root_folder="./../../data/RAP_dataset/", shuffle=shuffle, batch_size=batch_size, source=source, new_height=256, new_width=256),
                               ntop=2)

    n.conv1 = L.Convolution(n.data, kernel_size=11, num_output=96, stride=4, pad=0, group=1,
                            param=[dict(name='conv1_w', lr_mult=1, decay_mult=1),
                                   dict(name='conv1_b', lr_mult=2, decay_mult=0)])
    n.relu1 = L.ReLU(n.conv1, in_place=True)
    n.pool1 = L.Pooling(n.relu1, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)

    n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=256, stride=1, pad=2, group=2,
                            param=[dict(name='conv2_w', lr_mult=1, decay_mult=1),
                                   dict(name='conv2_b', lr_mult=2, decay_mult=0)])
    n.relu2 = L.ReLU(n.conv2, in_place=True)
    n.pool2 = L.Pooling(n.relu2, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)

    n.conv3 = L.Convolution(n.norm2, kernel_size=3, num_output=384, stride=1, pad=1, group=1,
                            param=[dict(name='conv3_w', lr_mult=1, decay_mult=1),
                                   dict(name='conv3_b', lr_mult=2, decay_mult=0)])
    n.relu3 = L.ReLU(n.conv3, in_place=True)

    n.conv4 = L.Convolution(n.relu3, kernel_size=3, num_output=384, stride=1, pad=1, group=2,
                            param=[dict(name='conv4_w', lr_mult=1, decay_mult=1),
                                   dict(name='conv4_b', lr_mult=2, decay_mult=0)])
    n.relu4 = L.ReLU(n.conv4, in_place=True)

    n.conv5 = L.Convolution(n.relu4, kernel_size=3, num_output=256, stride=1, pad=1, group=2,
                            param=[dict(name='conv5_w', lr_mult=1, decay_mult=1),
                                   dict(name='conv5_b', lr_mult=2, decay_mult=0)])
    n.relu5 = L.ReLU(n.conv5, in_place=True)
    n.pool5 = L.Pooling(n.relu5, pool=P.Pooling.MAX, kernel_size=3, stride=2)
    n.fc6 = L.InnerProduct(n.pool5, num_output=4096,
                           param=[dict(name='fc6_w', lr_mult=1, decay_mult=1),
                                  dict(name='fc6_b', lr_mult=2, decay_mult=0)],
                           weight_filler=dict(type='gaussian', std=0.01), bias_filler=dict(type='constant', value=0))
    n.relu6 = L.ReLU(n.fc6, in_place=True)
    n.drop6 = L.Dropout(n.relu6, in_place=True)
    n.fc7 = L.InnerProduct(n.drop6, num_output=4096,
                           param=[dict(name='fc7_w', lr_mult=1, decay_mult=1),
                                  dict(name='fc7_b', lr_mult=2, decay_mult=0)],
                           weight_filler=dict(type='gaussian', std=0.01), bias_filler=dict(type='constant', value=0))
    n.relu7 = L.ReLU(n.fc7, in_place=True)
    n.drop7 = L.Dropout(n.relu7, in_place=True)
    n.fchatt = L.InnerProduct(n.drop7, num_output=64,
                           param=[dict(name='fcatt_w', lr_mult=1, decay_mult=1),
                                  dict(name='fcatt_b', lr_mult=2, decay_mult=0)],
                           weight_filler=dict(type='gaussian', std=0.01), bias_filler=dict(type='constant', value=0))
    n.fine_fc8 = L.InnerProduct(n.fchatt, num_output=1,
                                param=[dict(name='fc8_body_w', lr_mult=1, decay_mult=1),
                                       dict(name='fc8_body_b', lr_mult=2, decay_mult=0)],
                                weight_filler=dict(type='gaussian', std=0.01), bias_filler=dict(type='constant', value=0))
    n.multilabel_loss = L.MultiLabelLoss(n.fine_fc8, n.label, loss_weight=1.0)
    n.multilabel_accuracy = L.MultiLabelAccuracy(n.fine_fc8, n.label)

    return n.to_proto()
コード例 #3
0
def caffenet(n, nclasses, acclayer=False, softmax=False, learn=True):
    # first input 'n', must have layer member: n.data

    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, learn=learn)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, learn=learn)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, learn=learn)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, learn=learn)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, learn=learn)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, learn=learn)
    n.drop6 = L.Dropout(n.relu6, in_place=True)
    n.fc7, n.relu7 = fc_relu(n.drop6, 4096, learn=learn)
    n.drop7 = L.Dropout(n.relu7, in_place=True)

    n.score = L.InnerProduct(
        n.drop7,
        num_output=nclasses,
        param=[dict(lr_mult=5, decay_mult=1),
               dict(lr_mult=10, decay_mult=0)])

    if softmax:
        n.loss = L.SoftmaxWithLoss(n.score, n.label)

    if acclayer:
        n.acc = L.Accuracy(n.score, n.label)
    return n
コード例 #4
0
def mknet(lmdb, mean_file, batch_size, gg_max):
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
				transform_param=dict(mean_file=mean_file, scale=1./128), ntop=2)
    n.conv1 = L.Convolution(n.data, kernel_size=5, num_output=32, pad=2, stride=1, 
				weight_filler=dict(type='gaussian', std=gg_max),
				bias_filler=dict(type='constant'))
    n.relu1 = L.ReLU(n.conv1, in_place=True)
    n.pool1 = L.Pooling(n.relu1, kernel_size=3, stride=2, pool=P.Pooling.MAX)
    n.norm1 = L.LRN(n.pool1, local_size=3, alpha=5e-5, beta=0.75, norm_region=1)
    n.conv2 = L.Convolution(n.norm1, kernel_size=5, num_output=32, pad=2, stride=1,
				weight_filler=dict(type='gaussian', std=gg_max),
				bias_filler=dict(type='constant'))
    n.relu2 = L.ReLU(n.conv2, in_place=True)
    n.pool2 = L.Pooling(n.relu2, kernel_size=3, stride=2, pool=P.Pooling.AVE)
    n.norm2 = L.LRN(n.pool2, local_size=3, alpha=5e-5, beta=0.75, norm_region=1)
    n.conv3 = L.Convolution(n.norm2, kernel_size=5, num_output=64, pad=2, stride=1,
				weight_filler=dict(type='gaussian', std=gg_max),
				bias_filler=dict(type='constant'))
    n.relu3 = L.ReLU(n.conv3, in_place=True)
    n.pool3 = L.Pooling(n.relu3, kernel_size=3, stride=2, pool=P.Pooling.AVE)
    n.fc1 =   L.InnerProduct(n.pool3, num_output=10, 
				weight_filler=dict(type='gaussian', std=gg_max),
				bias_filler=dict(type='constant'))
    n.loss =  L.SoftmaxWithLoss(n.fc1, n.label)
    n.acc = L.Accuracy(n.fc1, n.label)
    return n.to_proto()
コード例 #5
0
def caffenet(batch_size=256, include_acc=False, train = True, learn_all=True):

    subset = 'train' if train else 'test'
    source = caffe_root + 'data/flickr_style/%s.txt' % subset
    transform_param = dict(mirror=train, crop_size=227,
        mean_file=caffe_root + 'data/ilsvrc12/imagenet_mean.binaryproto')
    
    n=caffe.NetSpec()
    n.style_data, n.style_label = L.ImageData(
        transform_param=transform_param, source=source,
        batch_size=50, new_height=256, new_width=256, ntop=2)

    param = learned_param if learn_all else frozen_param

    # the net itself
    n.conv1, n.relu1 = conv_relu(n.style_data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    n.drop6 = L.Dropout(n.relu6, in_place=True)
    n.fc7, n.relu7 = fc_relu(n.drop6, 4096, param=param)
    n.drop7 = L.Dropout(n.relu7, in_place=True)
    n.fc8_style = L.InnerProduct(n.drop7, num_output=CLASS_NUM, param=learned_param)
    n.loss = L.SoftmaxWithLoss(n.fc8_style, n.style_label)

    n.acc = L.Accuracy(n.fc8_style, n.style_label)
    return n.to_proto()
コード例 #6
0
def caffenet(lmdb, batch_size=256, include_acc=False):
    data, label = L.Data(source=lmdb, backend=P.Data.LMDB, batch_size=batch_size, ntop=2,
        transform_param=dict(crop_size=227, mean_value=[104, 117, 123], mirror=True))

    # the net itself
    conv1, relu1 = conv_relu(data, 11, 96, stride=4)
    pool1 = max_pool(relu1, 3, stride=2)
    norm1 = L.LRN(pool1, local_size=5, alpha=1e-4, beta=0.75)
    conv2, relu2 = conv_relu(norm1, 5, 256, pad=2, group=2)
    pool2 = max_pool(relu2, 3, stride=2)
    norm2 = L.LRN(pool2, local_size=5, alpha=1e-4, beta=0.75)
    conv3, relu3 = conv_relu(norm2, 3, 384, pad=1)
    conv4, relu4 = conv_relu(relu3, 3, 384, pad=1, group=2)
    conv5, relu5 = conv_relu(relu4, 3, 256, pad=1, group=2)
    pool5 = max_pool(relu5, 3, stride=2)
    fc6, relu6 = fc_relu(pool5, 4096)
    drop6 = L.Dropout(relu6, in_place=True)
    fc7, relu7 = fc_relu(drop6, 4096)
    drop7 = L.Dropout(relu7, in_place=True)
    fc8 = L.InnerProduct(drop7, num_output=1000)
    loss = L.SoftmaxWithLoss(fc8, label)

    if include_acc:
        acc = L.Accuracy(fc8, label)
        return to_proto(loss, acc)
    else:
        return to_proto(loss)
コード例 #7
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data

    # frozen ?? #
    # 解释:如果只训练最后一层即全连接层,那么其他层的参数都不变也就是frozen
    param = learned_param if learn_all else frozen_param

    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)

    ##  这个if else 目的是什么? ##
    # 如果只预测不训练,params取多少都无所谓,因为不用反向传播,如果需要训练,那么就得分情况啦
    # 如果训练,则还要增加一个层:L.Dropout
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7

    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)

    # 对于只预测不训练,当然只是输出每种类型对应的可能结果啦
    if not train:
        n.probs = L.Softmax(fc8)

    # 对于要训练的,那么就是得输出训练集的损失函数和正确率啦
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        print(n.to_proto())
        return f.name
コード例 #8
0
def generate_net():
    n = caffe.NetSpec()

    n.data = L.Data(source=TRAIN_LMDB_DATA_FILE,
                    backend=P.Data.LMDB,
                    batch_size=1,
                    ntop=1,
                    transform_param=dict(scale=1. / 255))
    n.label = L.Data(source=TRAIN_LMDB_LABEL_FILE,
                     backend=P.Data.LMDB,
                     batch_size=1,
                     ntop=1)

    # the base net
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, pad=100)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
    n.pool5 = max_pool(n.relu5, 3, stride=2)

    # fully conv
    n.fc6, n.relu6 = conv_relu(n.pool5, 6, 4096)
    n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
    n.fc7, n.relu7 = conv_relu(n.drop6, 1, 4096)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)

    # weight_filler=dict(type='gaussian', std=0.0001), bias_filler=dict(type='constant')
    n.score_fr_ = L.Convolution(
        n.drop7,
        num_output=2,
        kernel_size=1,
        pad=0,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)])

    n.upscore_ = L.Deconvolution(n.score_fr_,
                                 convolution_param=dict(
                                     num_output=2,
                                     kernel_size=63,
                                     stride=32,
                                     group=2,
                                     bias_term=False,
                                     weight_filler=dict(type='bilinear')),
                                 param=[dict(lr_mult=0)])

    n.score = crop(n.upscore_, n.data)
    n.loss = L.SoftmaxWithLoss(n.score,
                               n.label,
                               loss_weight=1,
                               loss_param=dict(normalize=False,
                                               ignore_label=255))

    return n.to_proto()
コード例 #9
0
def fcn(split):
    n = caffe.NetSpec()
    pydata_params = dict(split=split,
                         mean=(104.00699, 116.66877, 122.67892),
                         seed=1337)
    if split == 'train':
        pydata_params['sbdd_dir'] = '../data/sbdd/dataset'
        pylayer = 'BDDSegDataLayer'
    else:
        pydata_params['bdd_dir'] = '/dl/data/bdd100k/seg'
        pylayer = 'BDDSegDataLayer'
    n.data, n.label = L.Python(module='voc_layers',
                               layer=pylayer,
                               ntop=2,
                               param_str=str(pydata_params))

    # the base net
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, pad=100)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2)
    n.pool5 = max_pool(n.relu5, 3, stride=2)

    # fully conv
    n.fc6, n.relu6 = conv_relu(n.pool5, 6, 4096)
    n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
    n.fc7, n.relu7 = conv_relu(n.drop6, 1, 4096)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)

    n.score_fr = L.Convolution(
        n.drop7,
        num_output=21,
        kernel_size=1,
        pad=0,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)])

    #  upscore
    n.upscore = L.Deconvolution(n.score_fr,
                                convolution_param=dict(num_output=21,
                                                       kernel_size=63,
                                                       stride=32,
                                                       bias_term=False),
                                param=[dict(lr_mult=0)])
    n.score = crop(n.upscore, n.data)
    n.loss = L.SoftmaxWithLoss(n.score,
                               n.label,
                               loss_param=dict(normalize=True,
                                               ignore_label=255))

    return n.to_proto()
コード例 #10
0
def caffenet_body(net, data, post, is_train):
    # the net itself
    net['conv1' + post], net['relu1' + post] = conv_relu(net[data],
                                                         11,
                                                         96,
                                                         stride=4,
                                                         is_train=is_train)
    net['pool1' + post] = max_pool(net['relu1' + post], 3, stride=2)
    net['norm1' + post] = L.LRN(net['pool1' + post],
                                local_size=5,
                                alpha=1e-4,
                                beta=0.75,
                                engine=P.LRN.CAFFE)
    net['conv2' + post], net['relu2' + post] = conv_relu(net['norm1' + post],
                                                         5,
                                                         256,
                                                         pad=2,
                                                         group=2,
                                                         is_train=is_train)
    net['pool2' + post] = max_pool(net['relu2' + post], 3, stride=2)
    net['norm2' + post] = L.LRN(net['pool2' + post],
                                local_size=5,
                                alpha=1e-4,
                                beta=0.75,
                                engine=P.LRN.CAFFE)
    net['conv3' + post], net['relu3' + post] = conv_relu(net['norm2' + post],
                                                         3,
                                                         384,
                                                         pad=1,
                                                         is_train=is_train)
    net['conv4' + post], net['relu4' + post] = conv_relu(net['relu3' + post],
                                                         3,
                                                         384,
                                                         pad=1,
                                                         group=2,
                                                         is_train=is_train)
    net['conv5' + post], net['relu5' + post] = conv_relu(net['relu4' + post],
                                                         3,
                                                         256,
                                                         pad=1,
                                                         group=2,
                                                         is_train=is_train)
    net['pool5' + post] = max_pool(net['relu5' + post], 3, stride=2)
    net['fc6' + post], net['relu6' + post] = fc_relu(net['pool5' + post],
                                                     4096,
                                                     is_train=is_train)
    net['drop6' + post] = L.Dropout(net['relu6' + post], in_place=True)
    net['fc7' + post], net['relu7' + post] = fc_relu(net['drop6' + post],
                                                     4096,
                                                     is_train=is_train)
    net['drop7' + post] = L.Dropout(net['relu7' + post], in_place=True)
    #n.score = L.InnerProduct(n.drop7, num_output=20, weight_filler=dict(type='gaussian', std=0.01))
    #n.loss = L.SigmoidCrossEntropyLoss(n.score, n.label)
    final = 'drop7' + post
    return net, final
コード例 #11
0
ファイル: create_net.py プロジェクト: soltrinox/romans_stack
def cnn(split):
    n = caffe.NetSpec()
    pydata_params = dict(dataset_dir='/home/kevin/dataset/processed_data', variable='depth_map', split=split, mean=(2),
            seed=1337, batch_size=1, img_size=(250,250))
    if split == 'deploy':
        n.img = L.Input(name='input', ntop=2, shape=[dict(dim=1),dict(dim=1),dict(dim=250),dict(dim=250)])
    else:
        if split is 'train':
            pydata_params['dtype'] = 'object'
            pylayer = 'ModelNetDataLayer'
        else:
            pydata_params['dtype'] = 'object'
            pylayer = 'ModelNetDataLayer'
    
        n.img, n.label = L.Python(module='data_layers.model_net_layer', layer=pylayer,
            ntop=2, param_str=str(pydata_params))

    # the base net
    n.conv1, n.relu1 = conv_relu("conv1", n.img, 96, ks=11, stride=4, pad=0)
    n.pool1 = max_pool(n.relu1, ks=3)
    n.norm1 = L.LRN(n.pool1, lrn_param=dict(local_size=5, alpha=0.0005, beta=0.75, k=2))
    # n.bn1 = L.BatchNorm(n.pool1, param=[dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)], batch_norm_param=dict(use_global_stats=True))

    n.conv2, n.relu2 = conv_relu("conv2", n.norm1, 256, ks=5, pad=2, group=2)
    n.pool2 = max_pool(n.relu2, ks=3)
    n.norm2 = L.LRN(n.pool2, lrn_param=dict(local_size=5, alpha=0.0005, beta=0.75, k=2))
    # n.bn2 = L.BatchNorm(n.pool2, param=[dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)], batch_norm_param=dict(use_global_stats=True))


    n.conv3, n.relu3 = conv_relu("conv3", n.norm2, 384, ks=3, pad=1, group=2)

    n.conv4, n.relu4 = conv_relu("conv4", n.relu3, 256, ks=3, pad=1, group=2)
    
    n.pool5 = max_pool(n.relu4, ks=3)

    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, lr1=1, lr2=2)   
    n.drop6 = L.Dropout(n.relu6, dropout_ratio=0.5, in_place=True)
    n.fc7, n.relu7 = fc_relu(n.drop6 , 4096, lr1=1, lr2=2)
    n.drop7 = L.Dropout(n.relu7, dropout_ratio=0.5, in_place=True)
    n.fc8 = fc(n.drop7, 40, lr1=1, lr2=2)

    if split != 'deploy':

        n.accuracy = L.Accuracy(n.fc8, n.label)
        #n.loss = L.SoftmaxWithLoss(n.fc8, n.label)

        n.loss = L.Python(n.fc8, n.label, loss_weight=1, module='nn_layers.max_softmax_loss_layer', layer='MaxSoftmaxLossLayer')



    # n.display = L.Scale(n.corr, param=[dict(lr_mult=0)], filler=dict(type='constant',value=1.0))
    # n.fc9_bn = L.BatchNorm(n.relu9, param=[dict(lr_mult=0),dict(lr_mult=0),dict(lr_mult=0)], batch_norm_param=dict(use_global_stats=True))

    return n.to_proto()
コード例 #12
0
def ZFNetBody(net, from_layer, for_training=True):
  net.conv1 = L.Convolution(net[from_layer], kernel_size=k_conv1, stride=s_conv1, num_output=d_conv1, pad=p_conv1, 
                            bias_term=True, weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                            param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu1 = L.ReLU(net.conv1, in_place=True)
  net.pool1 = L.Pooling(net.relu1, pool=P.Pooling.MAX, kernel_size=k_pool1, stride=s_pool1)
  net.norm1 = L.LRN(net.pool1, lrn_param=dict(local_size=local_size_norm1, alpha=alpha_norm1, beta=beta_norm1))
  
  net.conv2 = L.Convolution(net.norm1, kernel_size=k_conv2, stride=s_conv2, num_output=d_conv2, #pad=p_conv2, 
                            bias_term=True, weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                            param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu2 = L.ReLU(net.conv2, in_place=True)
  net.pool2 = L.Pooling(net.relu2, pool=P.Pooling.MAX, kernel_size=k_pool2, stride=s_pool2)
  net.norm2 = L.LRN(net.pool2, lrn_param=dict(local_size=local_size_norm2, alpha=alpha_norm2, beta=beta_norm2))
  
  net.conv3 = L.Convolution(net.norm2, kernel_size=k_conv3, stride=s_conv3, num_output=d_conv3, pad=p_conv3, 
                            bias_term=True, weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                            param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu3 = L.ReLU(net.conv3, in_place=True)
  
  net.conv4 = L.Convolution(net.relu3, kernel_size=k_conv4, stride=s_conv4, num_output=d_conv4, pad=p_conv4, 
                            bias_term=True, weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                            param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu4 = L.ReLU(net.conv4, in_place=True)
  
  net.conv5 = L.Convolution(net.relu4, kernel_size=k_conv5, stride=s_conv5, num_output=d_conv5, pad=p_conv5, 
                            bias_term=True, weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                            param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu5 = L.ReLU(net.conv5, in_place=True)
  net.pool5 = L.Pooling(net.relu5, pool=P.Pooling.MAX, kernel_size=k_pool5, stride=s_pool5)
  
  net.fc6 = L.InnerProduct(net.pool5, num_output=k_ip6,
                           weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                           param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu6 = L.ReLU(net.fc6, in_place=True)
  net.drop6 = L.Dropout(net.relu6, dropout_param=dict(dropout_ratio=r_drop6), in_place=True)
  
  net.fc7 = L.InnerProduct(net.fc6, num_output=k_ip7,
                           weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                           param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  net.relu7 = L.ReLU(net.fc7, in_place=True)
  net.drop7 = L.Dropout(net.relu7, dropout_param=dict(dropout_ratio=r_drop7), in_place=True)
  
  net.fc8 = L.InnerProduct(net.fc7, num_output=k_ip8,
                           weight_filler=dict(type='gaussian',std=0.01), bias_filler=dict(type='constant',std=0), 
                           param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)])
  if not for_training:
    net.acc = L.Accuracy(net.fc8, net.label, include=dict(phase=caffe_pb2.Phase.Value('TEST')))
    net.acc5 = L.Accuracy(net.fc8, net.label, include=dict(phase=caffe_pb2.Phase.Value('TEST')), accuracy_param=dict(top_k=5))
  
  net.loss = L.SoftmaxWithLoss(net.fc8, net.label)
  
  return net
コード例 #13
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False,
             deploy=False):
    """Returns a NetSpec specifying CaffeNet, following the original proto text
       specification (./models/bvlc_reference_caffenet/train_val.prototxt)."""
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    if train:
        protxt_file = train_prototxt
    else:
        protxt_file = val_prototxt
    if deploy:
        protxt_file = deploy_style_recognition_net_filename
    with open(protxt_file, 'w') as f:
        f.write(str(n.to_proto()))
コード例 #14
0
def define_network(args, imageFile, vidIds, radarFiles, training=False):
  net = caffe.NetSpec()
  
  # Setting up data layer
  transformParam = dict(mirror=training, mean_value = args.mean)
  pydataParams = dict(radar_files = radarFiles, videos = vidIds, batch_size = args.batchSize)
  
  net.data, net.label = L.ImageData(transform_param = transformParam, source=imageFile, shuffle=False, batch_size=args.batchSize, ntop=2)
  if args.expType != 'image':
    net.radar = L.Python(module='radarDataLayer', layer='RadarDataLayer', param_str=str(pydataParams), ntop=1)
 
  if args.expType == "joint" or args.expType == "image":
    net.conv1, net.relu1 = conv_relu(net.data, 11, 96, stride=4)
    net.pool1 = max_pool(net.relu1, 3, stride=2)
    net.norm1 = L.LRN(net.pool1, local_size=5, alpha=1e-4, beta=0.75)

    net.conv2, net.relu2 = conv_relu(net.norm1, 5, 256, pad=2, group=2)
    net.pool2 = max_pool(net.relu2, 3, stride=2)
    net.norm2 = L.LRN(net.pool2, local_size=5, alpha=1e-4, beta=0.75)

    net.conv3, net.relu3 = conv_relu(net.norm2, 3, 384, pad=1)
    net.conv4, net.relu4 = conv_relu(net.relu3, 3, 384, pad=1, group=2)
    net.conv5, net.relu5 = conv_relu(net.relu4, 3, 256, pad=1, group=2)
    net.pool5 = max_pool(net.relu5, 3, stride=2)

    net.fc6_new, net.relu6_new = fc_relu(net.pool5, 4096)
    net.drop6 = L.Dropout(net.relu6_new, in_place=True)

    net.fc7_new = L.InnerProduct(net.drop6, num_output=4096, param=learned_param, weight_filler=fc_filler)
    
    if args.expType == "joint":
      net.concat = L.Concat(net.fc7_new, net.radar)
      net.relu7 = L.ReLU(net.concat, in_place=True)
    else:
      net.relu7 = L.ReLU(net.fc7_new, in_place=True)

    net.drop7 = L.Dropout(net.relu7, in_place=True)
    net.final = L.InnerProduct(net.drop7, num_output=args.num_out, param=learned_param, weight_filler=fc_filler)

  elif args.expType == "radar":
    net.silence = L.Silence(net.data, ntop=0)
    net.fc7_new = L.InnerProduct(net.radar, num_output=1024, param=learned_param, weight_filler=fc_filler)
    net.relu7 = L.ReLU(net.fc7_new, in_place=True)
    net.drop7 = L.Dropout(net.relu7, in_place=True)
    net.final = L.InnerProduct(net.drop7, num_output=args.num_out, param=learned_param, weight_filler=fc_filler)

  net.loss = L.SoftmaxWithLoss(net.final, net.label)
  net.acc = L.Accuracy(net.final, net.label)
  return net.to_proto()
コード例 #15
0
def conv1_to_5(n, param):
    n.conv1, n.relu1 = conv_relu(n.data, ks=7, nout=96, stride=2, pad=3, param=param)
    n.norm1 = L.LRN(n.conv1, local_size=3, alpha=0.00005, beta=0.75, norm_region=LRN_NORMREGION_WITHIN_CHANNEL,
                   engine=LRN_ENGINE_CAFFE)

    n.pool1 = max_pool(n.norm1, 3, stride=2)

    n.conv2, n.relu2 = conv_relu(n.pool1, 5, 256, stride=2, pad=2, param=param)

    n.norm2 = L.LRN(n.conv2, local_size=3, alpha=0.00005, beta=0.75, norm_region=LRN_NORMREGION_WITHIN_CHANNEL,
                   engine=LRN_ENGINE_CAFFE)
    n.pool2 = max_pool(n.norm2, ks=3, stride=2, pad=1)
    n.conv3, n.relu3 = conv_relu(n.pool2, ks=3, nout=384, stride=1, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.conv3, ks=3, nout=384, stride=1, pad=1, param=param)
    n.conv5, n.relu5 = conv_relu(n.conv4, ks=3, nout=256, stride=1, pad=1, param=param)
コード例 #16
0
ファイル: generateResNet.py プロジェクト: xuxu2014/caffe-1
def conv_bn_layers(bottom, num_filter, param, weight_filler, bias_filler, kernel = 3, stride=1, pad = 0):
	conv = conv_layer(bottom, num_filter = num_filter, kernel = kernel, stride=stride, pad = pad,
		param = param, weight_filler = weight_filler, bias_filler = bias_filler)
	#bn = L.BatchNorm(conv, in_place=True, param = [{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])	#use_global_stats = False,
	bn = L.BN(conv, param = [dict(lr_mult=1), dict(lr_mult=1)], scale_filler=dict(type="constant", value=1), shift_filler=dict(type="constant", value=0))
	lrn = L.LRN(bn)
	return lrn
コード例 #17
0
    def assemble_net(self, lmdb, mean_file, proto_filename, batch_size, crop_size,img_channels, phase):
        layer_dict = {}
        n = caffe.NetSpec()

        if phase in ['Train', 'TRAIN','train']:
            dropout_flag = True
            loss_layer_flag = True
            prob_layer_flag = False
            acc_layer_flag = True

        if phase in ['test', 'TEST', 'Test']:
            dropout_flag = False
            loss_layer_flag = True
            prob_layer_flag = False
            acc_layer_flag = True

        if phase in ['deploy', 'DEPLOY', 'Deploy']:
            dropout_flag = False
            loss_layer_flag = False
            prob_layer_flag = True
            acc_layer_flag = False

        if phase in ['deploy', 'DEPLOY', 'Deploy']:
            n.data = L.Input(input_param= {'shape':{'dim':[1,img_channels,crop_size,crop_size]}})
        else:
            n.data, n.label = L.Data(transform_param={'crop_size': crop_size, 'mean_file':mean_file}, data_param={'source':lmdb,'batch_size':batch_size, 'backend': P.Data.LMDB},
                                        ntop=2)

        for i, layer_name in enumerate(self.topology):
            #print layer_name
            if 'conv' in layer_name:
                if i==0:
                    n.conv = self.conv2d(n.data, layer_name)
                else:
                    n.conv = self.conv2d(layer_dict[str(i-1)], layer_name)
                layer_dict[str(i)] = n.conv

            elif 'pool' in layer_name:
                pool_params = {'pool': 0, 'kernel_size': 3, 'stride': 2}
                n.pool = L.Pooling(layer_dict[str(i - 1)], pooling_param=pool_params, name=layer_name)
                layer_dict[str(i)] = n.pool

            elif 'fc' in layer_name or 'output' in layer_name:
                n.fc = self.fc(layer_dict[str(i - 1)], layer_name, dropout_flag)
                layer_dict[str(i)] = n.fc

            elif 'lrn' in layer_name:
                n.norm = L.LRN(layer_dict[str(i - 1)], local_size=5, alpha=1e-4, beta=0.75, name=layer_name)
                layer_dict[str(i)] = n.norm

            if loss_layer_flag:
                n.loss = L.SoftmaxWithLoss(layer_dict[str(i)], n.label, name='loss')
            if prob_layer_flag:
                n.prob = L.Softmax(layer_dict[str(i)], name='loss')
            if acc_layer_flag:
                n.accuracy = L.Accuracy(layer_dict[str(i)], n.label, name='accuracy')

        print layer_dict
	with open(proto_filename, 'w') as f:
                f.write(str(n.to_proto()))
コード例 #18
0
def convLayer(prev, lrn=False, param_name=None, bn=False, **kwargs):
    if param_name:
        name1 = param_name + '_kernels'
        name2 = param_name + '_bias'
        conv = L.Convolution(
            prev,
            param=[dict(lr_mult=1, name=name1),
                   dict(lr_mult=2, name=name2)],
            weight_filler=dict(type='msra'),
            **kwargs)
    else:
        conv = L.Convolution(prev,
                             param=[dict(lr_mult=1),
                                    dict(lr_mult=2)],
                             weight_filler=dict(type='msra'),
                             **kwargs)
    if bn:
        bn = L.BatchNorm(conv)
        relu = L.ReLU(bn, in_place=True)
    else:
        relu = L.ReLU(conv, in_place=True)
    if lrn:
        # optional Local Response Normalization
        relu = L.LRN(relu,
                     lrn_param={
                         'local_size': min(kwargs['num_output'] / 3, 5),
                         'alpha': 0.0001,
                         'beta': 0.75
                     })
    return relu
コード例 #19
0
def lenet(lmdb, batch_size):
    # our version of LeNet: a series of linear and simple nonlinear transformations
    n = caffe.NetSpec()
    n.data, n.label = L.Data(batch_size=batch_size,
                             backend=P.Data.LMDB,
                             source=lmdb,
                             transform_param=dict(scale=1. / 255),
                             ntop=2)
    n.conv1 = L.Convolution(n.data,
                            kernel_size=5,
                            num_output=32,
                            weight_filler=dict(type='xavier'))
    n.pool1 = L.Pooling(n.conv1, kernel_size=3, stride=2, pool=P.Pooling.MAX)
    n.relu1 = L.ReLU(n.pool1)
    n.norm1 = L.LRN(n.pool1, local_size=3, alpha=.00005, beta=.75)
    n.conv2 = L.Convolution(n.norm1,
                            kernel_size=3,
                            num_output=64,
                            weight_filler=dict(type='xavier'))
    n.pool2 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.conv3 = L.Convolution(n.pool1,
                            kernel_size=3,
                            num_output=128,
                            weight_filler=dict(type='xavier'))
    n.pool3 = L.Pooling(n.conv2, kernel_size=2, stride=2, pool=P.Pooling.MAX)
    n.ip1 = L.InnerProduct(n.pool2,
                           num_output=128 * 3 * 3,
                           weight_filler=dict(type='xavier'))
    n.relu1 = L.ReLU(n.ip1, in_place=True)
    n.ip2 = L.InnerProduct(n.relu1,
                           num_output=10,
                           weight_filler=dict(type='xavier'))
    n.loss = L.SoftmaxWithLoss(n.ip2, n.label)
    return n.to_proto()
コード例 #20
0
ファイル: cnet.py プロジェクト: griesen/Texture-Synthesis
def prototxt_rep(r):
    n = caffe.NetSpec()
    layers = r['model_state']['layers']

    size = math.sqrt(layers['data']['outputs']/3.)
    n.data, n.label = L.DummyData(shape=[dict(dim=[1, 3, size, size]),
                                         dict(dim=[1, 1, 1, 1])])
    lays['data'] = n.data
    lays['label'] = n.label
    for lname in layer_names:
        layer0 = layers[lname]
        input_name = layer0['inputs'][0]
        if input_name + '_neuron' in lays:
            input_name = input_name +'_neuron'
        intput = lays[input_name]
        ltype == layer0['type']
        if ltype == 'conv':
            lay = L.Convolution(input, 
                               kernel_size = layer0['filterSize'][0],
                               pad = -layer0['padding'][0],
                               stride = layer0['stride'][0],
                               num_output = layer0['filters'])
        elif ltype == 'pool':
            ptype = P.Pooling.MAX if layer0['pool'] == 'max' else P.Pooling.AVE
            lay = L.Pooling(input,
                            kernel_size = layer0['sizeX'],
                            stride = layer0['stride'],
                            pool = ptype)
        elif ltype == 'fc':
            lay = L.InnerProduct(input, 
                                 num_output = layer0['outputs'])
        elif ltype == 'neuron':
            ntype = layer0['neuron']
            if ntype == 'relu':
                cname = 'ReLU'
            lay = getattr(L, cname)(input, in_place=True)
        elif ltype == 'cmrnorm':
            lay = L.LRN(input,
                        local_size = layer0['size'],
                        alpha = layer0['scale'],
                        beta = layer0['pow'])
        elif ltype == 'dropout2':
            lay = L.Dropout(input,
                            dropout_ratio = 0.5)
        else:
            raise ValueError("Layer type %s not recognized" % ltype)

        setattr(n, lname, lay)
        lays[lname] = lay
        if layer0.get('neuron'):
            ntype = layer0['neuron']
            nname = lname + '_neuron' 
            if ntype == 'relu':
                cname = 'ReLU'
            nlay = getattr(L, cname)(lay, in_place=True)
            setattr(n, nname, nlay)
            lays[nname] = nlay
    
    return n
コード例 #21
0
def minivggnet(data,
               labels=None,
               train=False,
               cudnn=False,
               param=learned_param,
               num_classes=100,
               with_labels=True):
    """
    Returns a protobuf text file specifying a variant of VGG
    """
    n = caffe.NetSpec()
    n.data = data
    conv_kwargs = dict(param=param, train=train, cudnn=cudnn)
    n.conv1, n.relu1 = conv_relu(n.data, 7, 96, stride=2, **conv_kwargs)
    n.norm1 = L.LRN(n.relu1, local_size=5, alpha=0.0005, beta=0.75, k=2)
    n.pool1 = max_pool(n.norm1, 3, stride=3, train=train, cudnn=cudnn)
    n.conv2, n.relu2 = conv_relu(n.pool1,
                                 5,
                                 256,
                                 pad=1,
                                 stride=2,
                                 group=2,
                                 **conv_kwargs)
    n.pool2 = max_pool(n.relu2, 2, stride=2, train=train, cudnn=cudnn)
    n.conv3, n.relu3 = conv_relu(n.pool2, 3, 512, pad=1, **conv_kwargs)
    n.conv4, n.relu4 = conv_relu(n.relu3,
                                 3,
                                 512,
                                 pad=1,
                                 group=2,
                                 **conv_kwargs)
    n.conv5, n.relu5 = conv_relu(n.relu4,
                                 3,
                                 512,
                                 pad=1,
                                 group=2,
                                 **conv_kwargs)
    n.pool5 = max_pool(n.relu5, 3, stride=3, train=train, cudnn=cudnn)
    n.fc6, n.relu6 = fc_relu(n.pool5, 1024, param=param)
    n.drop6 = L.Dropout(n.relu6, in_place=True)
    n.fc7, n.relu7 = fc_relu(n.drop6, 1024, param=param)
    n.drop7 = L.Dropout(n.relu7, in_place=True)
    preds = n.fc8 = L.InnerProduct(n.drop7,
                                   num_output=num_classes,
                                   param=param)
    if not train:
        # Compute the per-label probabilities at test/inference time.
        preds = n.probs = L.Softmax(n.fc8)
    if with_labels:
        n.label = labels
        n.loss = L.SoftmaxWithLoss(n.fc8, n.label)
        n.accuracy_at_1 = L.Accuracy(preds, n.label)
        n.accuracy_at_5 = L.Accuracy(preds,
                                     n.label,
                                     accuracy_param=dict(top_k=5))
    else:
        n.ignored_label = labels
        n.silence_label = L.Silence(n.ignored_label, ntop=0)
    return to_tempfile(str(n.to_proto()))
コード例 #22
0
def LRN(bottom, name='lrn', ls=5, a=0.0001, b=0.75):
    return L.LRN(bottom,
                 name=name,
                 lrn_param={
                     'local_size': ls,
                     'alpha': a,
                     'beta': b
                 })
コード例 #23
0
ファイル: test_forward.py プロジェクト: lemonqueen/TVM
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.LRN(n.data,
                       local_size=_local_size,
                       alpha=_alpha,
                       beta=_beta)
     return n.to_proto()
コード例 #24
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    #     Returns a NetSpec specifying CaffeNet
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 1024, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 1024, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__("fc6_m", n.fc6)
    n.__setattr__("fc7_m", n.fc7)
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)
    # write the net to a temporary file and return its filename
    with tempfile.NamedTemporaryFile(delete=False) as f:
        f.write(str(n.to_proto()))
        return f.name
コード例 #25
0
def test_lrn():
    # type: ()->caffe.NetSpec

    n = caffe.NetSpec()
    n.input1 = L.Input(shape=make_shape([10, 3, 64, 64]))
    n.lrn1 = L.LRN(n.input1)

    return n
コード例 #26
0
def add_caffenet(net, num_labels):
    net.conv1, net.relu1 = conv_relu(net.data, 11, 96, stride=4)
    net.pool1 = max_pool(net.relu1, 3, stride=2)
    net.norm1 = L.LRN(net.pool1, local_size=5, alpha=1e-4, beta=0.75)
    net.conv2, net.relu2 = conv_relu(net.norm1, 5, 256, pad=2, group=2)
    net.pool2 = max_pool(net.relu2, 3, stride=2)
    net.norm2 = L.LRN(net.pool2, local_size=5, alpha=1e-4, beta=0.75)
    net.conv3, net.relu3 = conv_relu(net.norm2, 3, 384, pad=1)
    net.conv4, net.relu4 = conv_relu(net.relu3, 3, 384, pad=1, group=2)
    net.conv5, net.relu5 = conv_relu(net.relu4, 3, 256, pad=1, group=2)
    net.pool5 = max_pool(net.relu5, 3, stride=2)
    net.fc6, net.relu6 = fc_relu(net.pool5, 4096)
    net.drop6 = L.Dropout(net.relu6, in_place=True)
    net.fc7, net.relu7 = fc_relu(net.drop6, 4096)
    net.drop7 = L.Dropout(net.relu7, in_place=True)
    net.score = L.InnerProduct(net.drop7, num_output=num_labels)
    return net
コード例 #27
0
ファイル: layer_helpers.py プロジェクト: stoneyang/deep_share
def add_lrn(net, bottom, name, local_size, alpha, beta, k):
    """Add local response normalizaiton unit """
    net[name] = L.LRN(bottom,
                      local_size=local_size,
                      alpha=alpha,
                      beta=beta,
                      k=k,
                      in_place=True)
コード例 #28
0
def caffenet(data,
             label=None,
             train=True,
             num_classes=1000,
             classifier_name='fc8',
             learn_all=False):
    n = caffe.NetSpec()
    n.data = data
    param = learned_param if learn_all else frozen_param
    n.conv1, n.relu1 = conv_relu(n.data, 11, 96, stride=4, param=param)
    n.pool1 = max_pool(n.relu1, 3, stride=2)
    n.norm1 = L.LRN(n.pool1, local_size=5, alpha=1e-4, beta=0.75)
    n.conv2, n.relu2 = conv_relu(n.norm1, 5, 256, pad=2, group=2, param=param)
    n.pool2 = max_pool(n.relu2, 3, stride=2)
    n.norm2 = L.LRN(n.pool2, local_size=5, alpha=1e-4, beta=0.75)
    n.conv3, n.relu3 = conv_relu(n.norm2, 3, 384, pad=1, param=param)
    n.conv4, n.relu4 = conv_relu(n.relu3, 3, 384, pad=1, group=2, param=param)
    n.conv5, n.relu5 = conv_relu(n.relu4, 3, 256, pad=1, group=2, param=param)
    n.pool5 = max_pool(n.relu5, 3, stride=2)
    n.fc6, n.relu6 = fc_relu(n.pool5, 4096, param=param)
    if train:
        n.drop6 = fc7input = L.Dropout(n.relu6, in_place=True)
    else:
        fc7input = n.relu6
    n.fc7, n.relu7 = fc_relu(fc7input, 4096, param=param)
    if train:
        n.drop7 = fc8input = L.Dropout(n.relu7, in_place=True)
    else:
        fc8input = n.relu7
    # always learn fc8 (param=learned_param)
    fc8 = L.InnerProduct(fc8input, num_output=num_classes, param=learned_param)
    # give fc8 the name specified by argument `classifier_name`
    n.__setattr__(classifier_name, fc8)
    if not train:
        n.probs = L.Softmax(fc8)
    if label is not None:
        n.label = label
        n.loss = L.SoftmaxWithLoss(fc8, n.label)
        n.acc = L.Accuracy(fc8, n.label)

    mode = 'train' if train else 'test'
    filename = path_savemodel + num_fold + '_' + typeNet + '_' + mode + '.prototxt'
    with open(filename, 'w') as f:
        f.write(str(n.to_proto()))
        return filename
コード例 #29
0
 def test_lrn2(self):
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([6, 4, 64, 64]))
     n.bnll1 = L.LRN(n.input1,
                     local_size=7,
                     alpha=1.1,
                     beta=0.8,
                     norm_region=P.LRN.WITHIN_CHANNEL)
     self._test_model(*self._netspec_to_model(n, 'lrn2'))
コード例 #30
0
def conv_bn(bottom, nout, ks = 3, stride=1, pad = 0, learn = True):
    if learn:
        param = [dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)]
    else:
        param = [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)]
    
    conv = L.Convolution(bottom, kernel_size=ks, stride=stride,
            num_output=nout, pad=pad, param = param, weight_filler=dict(type="msra"), bias_filler=dict(type="constant"))
    bn = L.BatchNorm(conv)
    lrn = L.LRN(bn)
    return conv, bn, lrn