Exemple #1
0
def net_deploy(deploy_prototxt, model):
    from ofnet import ofnet

    n = caffe.NetSpec()
    n.data = L.Input(shape=[dict(dim=[1, 3, 224, 224])])

    ofnet(n, is_train=False)
    n.sigmoid_edge = L.Sigmoid(n.unet1b_edge)

    with open('ofnet_eval.prototxt', 'w') as f:
        f.write(str(n.to_proto()))  ## write network

    net = caffe.Net(deploy_prototxt, model, caffe.TEST)
    return net
Exemple #2
0
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.Convolution(n.data,
                               param=[
                                   dict(lr_mult=1, decay_mult=1),
                                   dict(lr_mult=2, decay_mult=0)
                               ],
                               kernel_size=7,
                               stride=2,
                               num_output=64,
                               weight_filler=dict(type="xavier", std=0.03))
     n.prelu = L.PReLU(n.dataout, in_place=True)
     return n.to_proto()
 def test_convolution4(self):
     CAFFE_ENGINE = 1
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([10, 4, 6, 6, 6]))
     n.conv1 = L.Convolution(n.input1,
                             num_output=10,
                             kernel_size=5,
                             pad=10,
                             stride=2,
                             group=2,
                             weight_filler=make_weight_filler(),
                             bias_filler=make_bias_filler(),
                             engine=CAFFE_ENGINE)
     self._test_model(*self._netspec_to_model(n, 'convolution4'))
 def test_deconvolution_multilinear_upsample(self):
     factor = 2
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([10, 3, 64, 64]))
     n.deconv1 = L.Deconvolution(n.input1,
                                 convolution_param=dict(
                                     kernel_size=2 * factor - factor % 2,
                                     stride=factor,
                                     num_output=3,
                                     pad=int(math.ceil((factor - 1) / 2.0)),
                                     weight_filler=dict(type="bilinear"),
                                     bias_term=False,
                                     group=3))
     self._test_model(*self._netspec_to_model(
         n, 'deconvolution_multilinear_upsample', random_weights=False))
Exemple #5
0
def overall_net(batch_size, channels, height, width, action_size, net_type):

    # param = learned_param
    n = caffe.NetSpec()
    # action
    n.frames = L.Input(shape=dict(dim=[batch_size, channels, height, width]))

    # Image feature
    if net_type == 'action':
        param = learned_param
    else:
        param = frozen_param

    n.conv1, n.relu1 = conv_relu(n.frames, 8, 32, stride=4, param=param)
    n.conv2, n.relu2 = conv_relu(n.relu1, 4, 64, stride=2, param=param)
    n.conv3, n.relu3 = conv_relu(n.relu2, 3, 64, stride=1, param=param)
    n.fc4, n.relu4 = fc_relu(n.relu3, 512, param=param)

    n.value_q = L.InnerProduct(n.relu4,
                               num_output=action_size,
                               param=param,
                               weight_filler=dict(type='gaussian', std=0.005),
                               bias_filler=dict(type='constant', value=1))

    if net_type == 'test':
        return n.to_proto()

    n.filter = L.Input(shape=dict(dim=[batch_size, action_size]))
    # operation 0: PROD
    n.filtered_value_q = L.Eltwise(n.value_q, n.filter, operation=0)

    n.target = L.Input(shape=dict(dim=[batch_size, action_size]))

    n.loss = L.EuclideanLoss(n.filtered_value_q, n.target)

    return n.to_proto()
Exemple #6
0
def _miso_op(data_list, func, *args, **kwargs):
    """Create multi input and single output Caffe op"""
    n = caffe.NetSpec()
    if not isinstance(data_list, (tuple, list)):
        raise TypeError("Need tuple or list but get {}".format(
            type(data_list)))
    input_list = list()
    for idx, data in enumerate(data_list):
        n["data" +
          str(idx)] = L.Input(input_param={"shape": {
              "dim": list(data.shape)
          }})
        input_list.append(n["data" + str(idx)])
    n.output = func(*input_list, *args, **kwargs)
    return n
def residual_factory_padding1(bottom, num_filter, stride, batch_size,
                              feature_size):
    conv1 = conv_factory_relu(bottom,
                              ks=3,
                              nout=num_filter,
                              stride=stride,
                              pad=1)
    conv2 = conv_factory(conv1, ks=3, nout=num_filter, stride=1, pad=1)
    pool1 = L.Pooling(bottom, pool=P.Pooling.AVE, kernel_size=2, stride=2)
    padding = L.Input(input_param=dict(shape=dict(
        dim=[batch_size, num_filter / 2, feature_size, feature_size])))
    concate = L.Concat(pool1, padding, axis=1)
    addition = L.Eltwise(concate, conv2, operation=P.Eltwise.SUM)
    relu = L.ReLU(addition, in_place=True)
    return relu
 def test_deconvolution3(self):
     CAFFE_ENGINE = 1
     n = caffe.NetSpec()
     n.input1 = L.Input(shape=make_shape([10, 4, 6, 6, 6]))
     n.deconv1 = L.Deconvolution(n.input1,
                                 convolution_param=dict(
                                     num_output=10,
                                     kernel_size=5,
                                     pad=1,
                                     stride=2,
                                     group=2,
                                     weight_filler=make_weight_filler(),
                                     bias_filler=make_bias_filler(),
                                     engine=CAFFE_ENGINE))
     self._test_model(*self._netspec_to_model(n, 'deconvolution3'))
Exemple #9
0
def _make_module(model_path, in_shape, axis):
    ns = caffe.NetSpec()
    ns.data = L.Input(name="data", input_param={"shape": {"dim": in_shape}})
    ns.softmax = L.Softmax(ns.data, name="softmax", axis=axis)

    with open(os.path.join(model_path, 'test.prototxt'), 'w') as f:
        f.write(str(ns.to_proto()))

    net = caffe.Net(f.name, caffe.TEST)
    for l in net.layers:
        for b in l.blobs:
            if np.count_nonzero(b.data) == 0:
                b.data[...] = np.random.randn(*b.data.shape)

    net.save(os.path.join(model_path, 'test.caffemodel'))
Exemple #10
0
def test_deconvolution():
    # type: ()->caffe.NetSpec

    n = caffe.NetSpec()
    n.input1 = L.Input(shape=make_shape([10, 4, 64, 64]))
    n.deconv1 = L.Deconvolution(n.input1,
                                convolution_param=dict(
                                    num_output=10,
                                    kernel_size=5,
                                    pad=10,
                                    stride=2,
                                    group=2,
                                    weight_filler=make_weight_filler(),
                                    bias_filler=make_bias_filler()))
    return n
Exemple #11
0
def test_deconvolution2():
    # type: ()->caffe.NetSpec

    n = caffe.NetSpec()
    n.input1 = L.Input(shape=make_shape([10, 4, 64, 64]))
    n.deconv1 = L.Deconvolution(n.input1,
                                convolution_param=dict(
                                    num_output=10,
                                    kernel_size=5,
                                    bias_term=False,
                                    pad_h=10,
                                    pad_w=5,
                                    weight_filler=make_weight_filler(),
                                    bias_filler=make_bias_filler()))
    return n
Exemple #12
0
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.InnerProduct(n.data,
                                param=[
                                    dict(lr_mult=1, decay_mult=1),
                                    dict(lr_mult=1, decay_mult=1)
                                ],
                                inner_product_param=dict(
                                    num_output=_num_output,
                                    weight_filler=dict(type='xavier',
                                                       std=_std),
                                    bias_filler=dict(type='constant',
                                                     value=_vaule)))
     return n.to_proto()
def createNet(shape1, shape2):
    blobShape1 = caffe_pb2.BlobShape()
    blobShape1.dim.extend(shape1)
    blobShape2 = caffe_pb2.BlobShape()
    blobShape2.dim.extend(shape2)

    n = caffe.NetSpec()  # 创建net
    n.data1, n.data2 = L.Input(shape=[blobShape1, blobShape2], ntop=2)

    prototxt = 'temp.prototxt'
    with open(prototxt, 'w') as f:
        f.write(str(n.to_proto()))
    net = caffe.Net(prototxt, caffe.TEST)
    #os.remove(prototxt)
    return net
Exemple #14
0
def _make_module(model_path, n, i_channels, i_size):
    ns = caffe.NetSpec()
    ns.data = L.Input(
        name="data",
        input_param={"shape": {
            "dim": [n, i_channels, i_size[0], i_size[1]]
        }})
    ns.relu = L.ReLU(ns.data, name="relu")

    with open(os.path.join(model_path, 'test.prototxt'), 'w') as f:
        f.write(str(ns.to_proto()))

    net = caffe.Net(f.name, caffe.TEST)

    net.save(os.path.join(model_path, 'test.caffemodel'))
Exemple #15
0
def head(in_place_bn=False, index_width=1, scale_ext=False):
    ns = caffe.NetSpec()
    input_params = dict(shape=dict(dim=[1, 3, 224, 224]))
    ns.data = L.Input(**input_params)
    conv1_params = dict(kernel_size=3,
                        num_output=32,
                        stride=2,
                        pad=1,
                        weight_filler=default_weight_filler,
                        bias_term=False)
    ns[f'conv{1:0{index_width}d}'] = L.Convolution(ns.data, **conv1_params)
    BatchNorm(ns, in_place_bn, scale_ext)
    ns[f'relu{1:0{index_width}d}/relu'] = L.ReLU(ns[ns.keys()[-1]],
                                                 in_place=True)
    return ns
Exemple #16
0
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.Convolution(n.data,
                               param=[
                                   dict(lr_mult=1, decay_mult=1),
                                   dict(lr_mult=2, decay_mult=0)
                               ],
                               kernel_size=7,
                               stride=2,
                               num_output=64,
                               pad=3,
                               weight_filler=dict(type="xavier", std=0.03),
                               bias_filler=dict(type='constant', value=0.2))
     n.drop = L.Dropout(n.dataout, dropout_ratio=ratio, in_place=True)
     return n.to_proto()
Exemple #17
0
 def net():
     n = caffe.NetSpec()
     n.data = L.Input(input_param=dict(shape=dict(dim=data_shape)))
     n.dataout = L.Convolution(n.data,
                               param=[
                                   dict(lr_mult=1, decay_mult=1),
                                   dict(lr_mult=2, decay_mult=0)
                               ],
                               kernel_size=_kernel_size,
                               stride=_stride,
                               num_output=_num_output,
                               pad=_pad,
                               group=_group,
                               weight_filler=dict(type="xavier", std=0.03),
                               bias_filler=dict(type='constant', value=0.2))
     return n.to_proto()
def createAutoencoder(hdf5, input_size, batch_size, phase):
    n = caffe.NetSpec()
    if phase == "inference":
        n.data = L.Input(input_param={'shape': {'dim': [1, input_size]}})
    else:
        n.data = L.HDF5Data(batch_size=batch_size, source=hdf5, ntop=1)
    n.ip1 = L.InnerProduct(n.data,
                           num_output=256,
                           weight_filler=dict(type='xavier'))
    n.bottleneck = L.Sigmoid(n.ip1, in_place=True)
    n.decode = L.InnerProduct(n.bottleneck,
                              num_output=input_size,
                              weight_filler=dict(type='xavier'))
    n.loss = L.EuclideanLoss(n.decode, n.data)

    return n.to_proto()
Exemple #19
0
def netinputv3():
    n = caffe.NetSpec()
    n.data = L.Input(input_param=dict(shape=[dict(dim=[1, 1, 32, 32])]))
    n.conv1 = conv(n.data, 3, 60)
    n.conv2 = conv(n.conv1, 3, 90)
    n.pool2 = pooling(n.conv2, 2, 2)
    n.conv3 = conv(n.pool2, 3, 120)
    n.pool3 = pooling(n.conv3, 2, 2)
    n.conv4 = conv(n.pool3, 3, 150)
    n.ip1 = ip(n.conv4, 1000)
    n.relu1 = relu(n.ip1)
    n.ip2 = ip(n.relu1, 500)
    n.relu2 = relu(n.ip2)
    n.ip3 = ip(n.relu2, 34)
    n.prob = L.Softmax(n.ip3)
    return str(n.to_proto())
Exemple #20
0
def test_deconvolution_multilinear_upsample():
    # type: ()->caffe.NetSpec

    factor = 2
    n = caffe.NetSpec()
    n.input1 = L.Input(shape=make_shape([10, 3, 64, 64]))
    n.deconv1 = L.Deconvolution(n.input1,
                                convolution_param=dict(
                                    kernel_size=2 * factor - factor % 2,
                                    stride=factor,
                                    num_output=3,
                                    pad=int(math.ceil((factor - 1) / 2.0)),
                                    weight_filler=dict(type="bilinear"),
                                    bias_term=False,
                                    group=3))
    return n
Exemple #21
0
def main():
    # Pretrained models on ImageNet
    weights = './mobilenet.caffemodel'
    assert os.path.exists(weights)

    # deploy
    deploy_data = L.Input(input_param=dict(shape=dict(dim=[1, 3, 224, 224])))
    deploy_proto = './deploy.prototxt'
    with open(deploy_proto, 'w') as f:
        f.write(str(mobilenet(data=deploy_data, num_classes=9)))

    # train
    train_source = './data/train_shuffled.txt'
    train_transform_param = dict(scale=0.017,
                                 mirror=True,
                                 crop_size=224,
                                 mean_value=[103.94, 116.78, 123.68])
    train_data, train_label = L.ImageData(
        transform_param=train_transform_param,
        source=train_source,
        root_folder="./data/",
        batch_size=16,
        new_height=256,
        new_width=256,
        ntop=2)
    train_proto = './train.prototxt'
    with open(train_proto, 'w') as f:
        f.write(
            str(mobilenet(data=train_data, label=train_label, num_classes=9)))

    # test
    test_source = './data/test_shuffled.txt'
    test_transform_param = dict(scale=0.017,
                                mirror=False,
                                crop_size=224,
                                mean_value=[103.94, 116.78, 123.68])
    test_data, test_label = L.ImageData(transform_param=test_transform_param,
                                        source=test_source,
                                        root_folder="./data/",
                                        batch_size=2,
                                        new_height=256,
                                        new_width=256,
                                        ntop=2)
    test_proto = './test.prototxt'
    with open(test_proto, 'w') as f:
        f.write(str(mobilenet(data=test_data, label=test_label,
                              num_classes=9)))
Exemple #22
0
def _make_module(model_path, n, i_channels, i_size, k_size, o_channels, strides, padding, dilations):
    ns = caffe.NetSpec()
    ns.data = L.Input(name="data", input_param={
                      "shape": {"dim": [n, i_channels, i_size[0], i_size[1]]}})
    ns.conv = L.Convolution(ns.data, name="conv2d", kernel_size=k_size, num_output=o_channels,
                            stride=strides, pad=padding, dilation=dilations, weight_filler=dict(type='xavier'))

    with open(os.path.join(model_path, 'test.prototxt'), 'w') as f:
        f.write(str(ns.to_proto()))

    net = caffe.Net(f.name, caffe.TEST)
    for l in net.layers:
        for b in l.blobs:
            if np.count_nonzero(b.data) == 0:
                b.data[...] = np.random.randn(*b.data.shape)

    net.save(os.path.join(model_path, 'test.caffemodel'))
def generate_data(method):
    if method == "train_test":
        net.data, net.label = Layers.ImageData(
            name="cifar",
            ntop=2,
            transform_param=dict(
                mirror=True,
                crop_size=32,
                mean_value=[104, 117, 123],
            ),
            image_data_param=dict(
                source="/home/cengsl14/dataset/cifar10_train_list.txt",
                batch_size=128,
                shuffle=True,
                is_color=True,
                root_folder="/home/cengsl14/dataset/cifar10_train/",
            ),
            include=dict(phase=caffe.TRAIN, ),
        )
        # add repetive train part
        train_part = str(net.to_proto())
        ## test part
        net.data, net.label = Layers.ImageData(
            name="cifar",
            ntop=2,
            transform_param=dict(
                mirror=False,
                mean_value=[104, 117, 123],
            ),
            image_data_param=dict(
                source="/home/cengsl14/dataset/cifar10_test_list.txt",
                batch_size=32,
                shuffle=False,
                is_color=True,
                mirror=False,
                root_folder="/home/cengsl14/dataset/cifar10_test/",
            ),
            include=dict(phase=caffe.TEST),
        )
        return train_part
    elif method == "deploy":
        net.tops["data"] = Layers.Input(
            name="cifar", input_param=dict(shape=dict(dim=[1, 3, 32, 32])))
        return ""
    else:
        raise NotImplementedError
Exemple #24
0
def KitModel(weight_file=None):
    n = caffe.NetSpec()

    n.input = L.Input(shape=[dict(dim=[1, 1, 32, 32])], ntop=1)
    n.HardNetnSequentialnfeaturesnnConv2dn0n7 = L.Convolution(n.input,
                                                              kernel_h=7,
                                                              kernel_w=7,
                                                              stride=1,
                                                              num_output=32,
                                                              pad_h=0,
                                                              pad_w=0,
                                                              group=1,
                                                              bias_term=True,
                                                              ntop=1)
    n.HardNetnSequentialnfeaturesnnTanhn1n8 = L.TanH(
        n.HardNetnSequentialnfeaturesnnConv2dn0n7, ntop=1)
    n.HardNetnSequentialnfeaturesnnMaxPool2dn2n9 = L.Pooling(
        n.HardNetnSequentialnfeaturesnnTanhn1n8,
        pool=0,
        kernel_size=2,
        pad_h=0,
        pad_w=0,
        stride=2,
        ntop=1)
    n.HardNetnSequentialnfeaturesnnConv2dn3n10 = L.Convolution(
        n.HardNetnSequentialnfeaturesnnMaxPool2dn2n9,
        kernel_h=6,
        kernel_w=6,
        stride=1,
        num_output=64,
        pad_h=0,
        pad_w=0,
        group=1,
        bias_term=True,
        ntop=1)
    n.HardNetnSequentialnfeaturesnnTanhn4n11 = L.TanH(
        n.HardNetnSequentialnfeaturesnnConv2dn3n10, ntop=1)
    n.HardNetnSequentialnclassifiernnLinearn0n13 = L.InnerProduct(
        n.HardNetnSequentialnfeaturesnnTanhn4n11,
        num_output=128,
        bias_term=True,
        ntop=1)
    n.HardNetnSequentialnclassifiernnTanhn1n14 = L.TanH(
        n.HardNetnSequentialnclassifiernnLinearn0n13, ntop=1)

    return n
Exemple #25
0
def create_proto(split):
    n = caffe.NetSpec()

    # data layer
    if split == 'train' or split == 'test':
        if split == 'train':
            source = "mnist_train_lmdb"
            phase = caffe.TRAIN
            bs = 64
        else:
            source = "mnist_test_lmdb"
            phase = caffe.TEST
            bs = 100

        data_param = {
                "source" : source,
                "backend" :  P.Data.LMDB,
                "batch_size" : bs,
        }
        transform_param = {
                "scale": 1/256.0
        }
        n.data, n.label = L.Data(
            ntop=2,  data_param=data_param, transform_param = transform_param)
    elif split == 'deploy':
        n.data = L.Input(
            input_param={"shape": {"dim": [64, 1, 28, 28]}})
    else:
        raise NotImplementedError

    n.conv1 = convolution(n.data, nout=20, ks=5, stride=1, pad=0)
    n.pool1 = max_pool(n.conv1, ks=2, stride=2)
    n.conv2 = convolution(n.pool1, nout=50, ks=5, stride=1, pad=0)
    n.pool2 = max_pool(n.conv2, ks=2, stride=2)
    n.ip1   = inner_product(n.pool2, nout=500)
    n.relu1 = relu(n.ip1)
    n.ip2   = inner_product(n.ip1, nout=10)
               
    if split == 'train' or split == 'test':
        n.accuracy = accuracy(n.ip2, n.label)
        n.softmax = softmax_loss(n.ip2, n.label)
    elif split == 'deploy':
        n.softmax = softmax(n.ip2)

    return n.to_proto()
def fcn(split, num_classes=None):
    n = caffe.NetSpec()
    n.data = L.Input(shape=[dict(dim=[1, 3, 500, 500])])

    # conv1
    n.conv1_1, n.relu1_1 = conv_relu(n.data, 64, pad=100)
    n.conv1_2, n.relu1_2 = conv_relu(n.relu1_1, 64)
    n.pool1 = max_pool(n.relu1_2)

    # conv2
    n.conv2_1, n.relu2_1 = conv_relu(n.pool1, 128)
    n.conv2_2, n.relu2_2 = conv_relu(n.relu2_1, 128)
    n.pool2 = max_pool(n.relu2_2)

    # conv3
    n.conv3_1, n.relu3_1 = conv_relu(n.pool2, 256)
    n.conv3_2, n.relu3_2 = conv_relu(n.relu3_1, 256)
    n.conv3_3, n.relu3_3 = conv_relu(n.relu3_2, 256)
    n.pool3 = max_pool(n.relu3_3)

    # conv4
    n.conv4_1, n.relu4_1 = conv_relu(n.pool3, 512)
    n.conv4_2, n.relu4_2 = conv_relu(n.relu4_1, 512)
    n.conv4_3, n.relu4_3 = conv_relu(n.relu4_2, 512)
    n.pool4 = max_pool(n.relu4_3)

    # score
    n.score_fr = L.Convolution(
        n.pool4,
        num_output=num_classes,
        kernel_size=1,
        pad=0,
        param=[dict(lr_mult=1, decay_mult=1),
               dict(lr_mult=2, decay_mult=0)])

    # deconv : 16x
    n.upscore = L.Deconvolution(n.score_fr,
                                convolution_param=dict(num_output=num_classes,
                                                       kernel_size=32,
                                                       stride=16,
                                                       bias_term=False),
                                param=[dict(lr_mult=0)])
    n.score = crop(n.upscore, n.data)

    return n.to_proto()
Exemple #27
0
def test_net():
    import caffe
    import skimage.data as dt
    import skimage.transform as skt
    import numpy as np
    from matplotlib import pyplot as plt

    def pre_processing(img):
        if img.ndim == 2:  # Gray image
            img = np.concatenate([np.expand_dims(img, 2)] * 3, 2)
        elif img.shape[2] > 3:  # RGB+depth or RGB+alpha. Ignore 4th channel
            img = img[:, :, :3]
        img = skt.resize(img, (256, 256)) * 255
        img = img[:, :, (2, 1, 0)]
        img = np.transpose(img, (2, 0, 1))
        img = img[:, 16:243, 16:243]
        img -= np.array([104, 117, 123]).reshape((3, 1, 1))
        return img

    # Load and process image
    img = dt.chelsea()
    img_prep = pre_processing(img)

    # Specify model
    ns = caffe.NetSpec()
    alexnet = AlexNet(ns)
    ns.data = L.Input(shape=dict(dim=[1, 3, 227, 227]))
    alexnet.inference_proto(ns.data, mult=1.)
    deploy_fn = '/tmp/deploy.prototxt'
    with open(deploy_fn, 'w') as f:
        f.write(str(ns.to_proto()))

    # Load pretrained model
    net = caffe.Net(deploy_fn, caffe.TEST)
    alexnet.load_pretrained(net)

    # Forward image and infer class
    net.blobs['data'].data[0] = img_prep
    net.forward()

    label = np.argmax(net.blobs['fc8'].data, axis=1)
    classes = [l.strip() for l in open('imagenet_synsets.txt')]
    print classes[label[0]]
    plt.imshow(img)
    plt.show()
Exemple #28
0
def construc_net():
    net = caffe.NetSpec()

    net.data = L.Input(shape=dict(dim=[10, 3, 224, 224]))

    block_1 = _block_crp('1', 2, net, net.data, 64)
    block_2 = _block_crp('2', 2, net, block_1, 128)
    block_3 = _block_crp('3', 4, net, block_2, 256)
    block_4 = _block_crp('4', 4, net, block_3, 512)
    block_5 = _block_crp('5', 4, net, block_4, 512)

    block_6 = _block_frd('6', net, block_5, 4096)
    block_7 = _block_frd('7', net, block_6, 4096)

    net.fc8 = L.InnerProduct(block_7, num_output=1000)
    net.prob = L.Softmax(net.fc8)

    return net.to_proto()
Exemple #29
0
def residual_factory_padding2(bottom, num_filter, stride, batch_size,
                              feature_size):
    conv1 = conv_factory_relu(bottom,
                              ks=3,
                              nout=num_filter,
                              stride=stride,
                              pad=1)
    conv2 = conv_factory(conv1, ks=3, nout=num_filter, stride=1, pad=1)
    pool1 = L.Pooling(bottom, pool=P.Pooling.AVE, kernel_size=2, stride=2)
    padding = L.Input(input_param=dict(shape=dict(
        dim=[batch_size, num_filter / 2, feature_size, feature_size])))
    concate = L.Concat(pool1, padding, axis=1)
    addition = L.Python(concate,
                        conv2,
                        module='resnet_oc',
                        ntop=1,
                        layer='RandAdd')
    relu = L.ReLU(addition, in_place=True)
    return relu
Exemple #30
0
def construc_net():
    net = caffe.NetSpec()

    net.data = L.Input(input_param=dict(shape=dict(dim=[1, 3, 224, 224])))

    block1 = _block_first(net, net.data)

    net.pool1 = L.Pooling(block1, pool=P.Pooling.MAX, kernel_size=3, stride=2)

    branch_2a = _branch('2a',
                        net,
                        net.pool1,
                        128,
                        has_branch1=True,
                        is_branch_2a=True)
    branch_2b = _branch('2b', net, branch_2a, 128)
    branch_2c = _branch('2c', net, branch_2b, 128)

    branch_3a = _branch('3a', net, branch_2c, 256, has_branch1=True)
    branch_3b = _branch('3b', net, branch_3a, 256)
    branch_3c = _branch('3c', net, branch_3b, 256)
    branch_3d = _branch('3d', net, branch_3c, 256)

    branch_4a = _branch('4a', net, branch_3d, 512, has_branch1=True)
    branch_4b = _branch('4b', net, branch_4a, 512)
    branch_4c = _branch('4c', net, branch_4b, 512)
    branch_4d = _branch('4d', net, branch_4c, 512)
    branch_4e = _branch('4e', net, branch_4d, 512)
    branch_4f = _branch('4f', net, branch_4e, 512)

    branch_5a = _branch('5a', net, branch_4f, 1024, has_branch1=True)
    branch_5b = _branch('5b', net, branch_5a, 1024)
    branch_5c = _branch('5c', net, branch_5b, 1024)

    net.pool5 = L.Pooling(branch_5c,
                          pool=P.Pooling.AVE,
                          kernel_size=7,
                          stride=1)

    net.fc6 = L.InnerProduct(net.pool5, num_output=1000)
    net.prob = L.Softmax(net.fc6)

    return net.to_proto()