Пример #1
0
    def create_net(self):
        '''
        Create singa net based on caffe proto files.
            net_proto: caffe prototxt that describes net
            solver_proto: caffe prototxt that describe solver
            input_sample_shape: shape of input data tensor
        return:
            a FeedForwardNet object
        '''
        caffe_net = self.read_net_proto()
        caffe_solver = None
        if self.caffe_solver_path is not None:
            caffe_solver = self.read_solver_proto()
        layer_confs = ''
        flatten_id = 0

        # If the net proto has the input shape
        if len(caffe_net.input_dim) > 0:
            self.input_sample_shape = caffe_net.input_dim
        if len(caffe_net.layer):
            layer_confs = caffe_net.layer
        elif len(caffe_net.layers):
            layer_confs = caffe_net.layers
        else:
            raise Exception('Invalid proto file!')

        net = ffnet.FeedForwardNet()
        for i in range(len(layer_confs)):
            if layer_confs[i].type == 'Data' or layer_confs[i].type == 5:
                continue
            elif layer_confs[i].type == 'Input':
                self.input_sample_shape = layer_confs[i].input_param.shape[0].dim[1:]
            elif layer_confs[i].type == 'SoftmaxWithLoss' or layer_confs[i].type == 21:
                net.loss = loss.SoftmaxCrossEntropy()
            elif layer_confs[i].type == 'EuclideanLoss' or layer_confs[i].type == 7:
                net.loss = loss.SquareError()
            elif layer_confs[i].type == 'Accuracy' or layer_confs[i].type == 1:
                net.metric = metric.Accuracy()
            else:
                strConf = layer_confs[i].SerializeToString()
                conf = model_pb2.LayerConf()
                conf.ParseFromString(strConf)
                if caffe_solver:
                    layer.engine = self.convert_engine(
                        layer_confs[i], caffe_solver.solver_mode)
                else:
                    # if caffe_solver is None,
                    layer.engine = self.convert_engine(layer_confs[i], 0)
                lyr = layer.Layer(conf.name, conf)
                if len(net.layers) == 0:
                    print('input sample shape: ', self.input_sample_shape)
                    lyr.setup(self.input_sample_shape)
                    print(lyr.name, lyr.get_output_sample_shape())
                if layer_confs[i].type == 'InnerProduct' or layer_confs[i].type == 14:
                    net.add(layer.Flatten('flat' + str(flatten_id)))
                    flatten_id += 1
                net.add(lyr)

        return net
Пример #2
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    ConvBnReLU(net, 'conv1_1', 64, (3, 32, 32))
    net.add(layer.Dropout('drop1', 0.3))
    ConvBnReLU(net, 'conv1_2', 64)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 512)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 512)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 512)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dropout('drop_flat', 0.5))
    net.add(layer.Dense('ip1', 512))
    net.add(layer.BatchNormalization('batchnorm_ip1'))
    net.add(layer.Activation('relu_ip1'))
    net.add(layer.Dropout('drop_ip2', 0.5))
    net.add(layer.Dense('ip2', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, 3 * 3 * p.shape[0])
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Пример #3
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    net.add(
        layer.Conv2D('conv1',
                     nb_kernels=32,
                     kernel=7,
                     stride=3,
                     pad=1,
                     input_sample_shape=input_shape))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv2', nb_kernels=64, kernel=5, stride=3))
    net.add(layer.Activation('relu2'))
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv3', nb_kernels=128, kernel=3, stride=1, pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv4', nb_kernels=256, kernel=3, stride=1))
    net.add(layer.Activation('relu4'))
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 256))
    net.add(layer.Activation('relu5'))
    net.add(layer.Dense('ip6', 16))
    net.add(layer.Activation('relu6'))
    net.add(layer.Dense('ip7', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Пример #4
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLUPool(net, 'conv1', 32, input_shape)
    ConvBnReLUPool(net, 'conv2', 64)
    ConvBnReLUPool(net, 'conv3', 128)
    ConvBnReLUPool(net, 'conv4', 128)
    ConvBnReLUPool(net, 'conv5', 256)
    ConvBnReLUPool(net, 'conv6', 256)
    ConvBnReLUPool(net, 'conv7', 512)
    ConvBnReLUPool(net, 'conv8', 512)

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Пример #5
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(
        layer.Conv2D("conv1", 16, 3, 1, pad=1, input_sample_shape=(3, 32, 32)))
    net.add(layer.BatchNormalization("bn1"))
    net.add(layer.Activation("relu1"))

    Block(net, "2a", 16, 1)
    Block(net, "2b", 16, 1)
    Block(net, "2c", 16, 1)

    Block(net, "3a", 32, 2)
    Block(net, "3b", 32, 1)
    Block(net, "3c", 32, 1)

    Block(net, "4a", 64, 2)
    Block(net, "4b", 64, 1)
    Block(net, "4c", 64, 1)

    net.add(layer.AvgPooling2D("pool4", 8, 8, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        # print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                # initializer.gaussian(p, 0, math.sqrt(2.0/p.shape[1]))
                initializer.gaussian(p, 0, 9.0 * p.shape[0])
            else:
                initializer.uniform(p, p.shape[0], p.shape[1])
        else:
            p.set_value(0)
        # print name, p.l1()

    return net
Пример #6
0
def create_net(in_shape, hyperpara, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[5]
    print ("kernel_x: ", kernel_x)
    print ("stride_x: ", stride_x)
    net = myffnet.ProbFeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(layer.Conv2D('conv1', 100, kernel=(kernel_y, kernel_x), stride=(stride_y, stride_x), pad=(0, 0),
                         input_sample_shape=(int(in_shape[0]), int(in_shape[1]), int(in_shape[2]))))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 1, pad=0))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('dense', 2))

    for (pname, pvalue) in zip(net.param_names(), net.param_values()):
        if len(pvalue.shape) > 1:
            initializer.gaussian(pvalue, pvalue.shape[0], pvalue.shape[1])
        else:
            pvalue.set_value(0)
        print (pname, pvalue.l1())
    return net
Пример #7
0
 def build_net(self):
     if self.use_cpu:
         layer.engine = 'singacpp'
     else:
         layer.engine = 'cudnn'
     self.net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(),
                                     metric.Accuracy())
     self.net.add(
         Reshape('reshape1', (self.vocab_size, ),
                 input_sample_shape=(self.maxlen, self.vocab_size)))
     self.net.add(layer.Dense('embed',
                              self.embed_size))  # output: (embed_size, )
     self.net.add(layer.Dropout('dropout'))
     self.net.add(Reshape('reshape2', (1, self.maxlen, self.embed_size)))
     self.net.add(
         layer.Conv2D('conv',
                      self.filters, (self.kernel_size, self.embed_size),
                      border_mode='valid'))  # output: (filter, embed_size)
     if self.use_cpu == False:
         self.net.add(layer.BatchNormalization('batchNorm'))
     self.net.add(layer.Activation('activ'))  # output: (filter, embed_size)
     self.net.add(layer.MaxPooling2D('max', stride=self.pool_size))
     self.net.add(layer.Flatten('flatten'))
     self.net.add(layer.Dense('dense', 2))
Пример #8
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}

    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
    net.add(
        layer.Conv2D('conv1',
                     32,
                     5,
                     1,
                     W_specs=W0_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2,
                     input_sample_shape=(
                         3,
                         32,
                         32,
                     )))
    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
    net.add(layer.Activation('relu1'))
    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv2',
                     32,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu2'))
    net.add(layer.AvgPooling2D('pool2', 3, 2, pad=1))
    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv3',
                     64,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
    net.add(layer.Flatten('flat'))
    net.add(
        layer.Dense('dense',
                    10,
                    W_specs=W2_specs.copy(),
                    b_specs=b_specs.copy()))
    for (p, specs) in zip(net.param_values(), net.param_specs()):
        filler = specs.filler
        if filler.type == 'gaussian':
            p.gaussian(filler.mean, filler.std)
        else:
            p.set_value(0)
        print specs.name, filler.type, p.l1()

    return net
Пример #9
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLU(net, 'conv1_1', 64, input_shape)
    #net.add(layer.Dropout('drop1', 0.3))
    net.add(layer.MaxPooling2D('pool0', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv1_2', 128)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 256)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 256)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 256)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv6_1', 512)
    #net.add(layer.Dropout('drop6_1', 0.4))
    #ConvBnReLU(net, 'conv6_2', 512)
    #net.add(layer.Dropout('drop6_2', 0.4))
    #ConvBnReLU(net, 'conv6_3', 512)
    #net.add(layer.MaxPooling2D('pool6', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv7_1', 512)
    #net.add(layer.Dropout('drop7_1', 0.4))
    #ConvBnReLU(net, 'conv7_2', 512)
    #net.add(layer.Dropout('drop7_2', 0.4))
    #ConvBnReLU(net, 'conv7_3', 512)
    #net.add(layer.MaxPooling2D('pool7', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net