Esempio n. 1
0
def test_IFNNSR():
    net = caffe.NetSpec()

    net.data = L.Input(shape=dict(dim=[1, 1, 2 * depth + 1, 2 * depth + 1]),
                       ntop=1)

    net.model = net.data
    net.model = weight(net.model, share)  # element-wise product
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, share, group, kernel, dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.sum = net.model

    for j in range(depth - 1):
        net.model = weight(net.model, share)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, share, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.sum = L.Eltwise(net.sum, net.model)

    net.predict = weight(net.sum, share)
    # net.loss = L.WeightL2Loss(net.predict, net.label)
    # net.loss = L.EuclideanLoss(net.predict, net.label)

    return net.to_proto()
Esempio n. 2
0
def train_IFNNSR():
    net = caffe.NetSpec()
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': train_data_path,
        'batch_size': batch_size_train
    },
                                     include={'phase': caffe.TRAIN},
                                     ntop=2)
    train_data_layer = str(net.to_proto())
    net.data, net.label = L.HDF5Data(hdf5_data_param={
        'source': test_data_path,
        'batch_size': batch_size_test
    },
                                     include={'phase': caffe.TEST},
                                     ntop=2)

    net.model = net.data
    net.model = weight(net.model, share)  # element-wise product
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, channel, group, kernel,
                       dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.model = smooth(net.model, share, group, kernel, dilate)  # convolution
    net.model = L.TanH(net.model)  # tanh actiavtion function
    net.sum = net.model

    for j in range(depth - 1):
        net.model = weight(net.model, share)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, channel, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.model = smooth(net.model, share, group, kernel, dilate)
        net.model = L.TanH(net.model)
        net.sum = L.Eltwise(net.sum, net.model)

    net.predict = weight(net.sum, share)
    net.loss = L.WeightL2Loss(net.predict, net.label)
    # net.loss = L.EuclideanLoss(net.predict, net.label)

    return train_data_layer + str(net.to_proto())