예제 #1
0
    def test_flatten(self):
        input_sample_shape = (64, 1, 12)
        flatten = layer.Flatten('flat', input_sample_shape=input_sample_shape)
        out_sample_shape = flatten.get_output_sample_shape()
        self.check_shape(out_sample_shape, (64 * 1 * 12, ))

        flatten = layer.Flatten('flat', axis=2,
                                input_sample_shape=input_sample_shape)
        out_sample_shape = flatten.get_output_sample_shape()
        self.check_shape(out_sample_shape, (12,))
예제 #2
0
 def __init__(self, num_classes=10, num_channels=1):
     super(AlexNet, self).__init__()
     self.num_classes = num_classes
     self.input_size = 224
     self.dimension = 4
     self.conv1 = layer.Conv2d(num_channels, 64, 11, stride=4, padding=2)
     self.conv2 = layer.Conv2d(64, 192, 5, padding=2)
     self.conv3 = layer.Conv2d(192, 384, 3, padding=1)
     self.conv4 = layer.Conv2d(384, 256, 3, padding=1)
     self.conv5 = layer.Conv2d(256, 256, 3, padding=1)
     self.linear1 = layer.Linear(4096)
     self.linear2 = layer.Linear(4096)
     self.linear3 = layer.Linear(num_classes)
     self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
     self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
     self.pooling3 = layer.MaxPool2d(2, 2, padding=0)
     self.avg_pooling1 = layer.AvgPool2d(3, 2, padding=0)
     self.relu1 = layer.ReLU()
     self.relu2 = layer.ReLU()
     self.relu3 = layer.ReLU()
     self.relu4 = layer.ReLU()
     self.relu5 = layer.ReLU()
     self.relu6 = layer.ReLU()
     self.relu7 = layer.ReLU()
     self.flatten = layer.Flatten()
     self.dropout1 = layer.Dropout()
     self.dropout2 = layer.Dropout()
     self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
예제 #3
0
파일: resnet.py 프로젝트: zlheui/singa
    def __init__(self, block, layers, num_classes=10, num_channels=3):
        self.inplanes = 64
        super(ResNet, self).__init__()
        self.num_classes = num_classes
        self.input_size = 224
        self.dimension = 4
        self.conv1 = layer.Conv2d(num_channels,
                                  64,
                                  7,
                                  stride=2,
                                  padding=3,
                                  bias=False)
        self.bn1 = layer.BatchNorm2d(64)
        self.relu = layer.ReLU()
        self.maxpool = layer.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1, layers1 = self._make_layer(block, 64, layers[0])
        self.layer2, layers2 = self._make_layer(block, 128, layers[1], stride=2)
        self.layer3, layers3 = self._make_layer(block, 256, layers[2], stride=2)
        self.layer4, layers4 = self._make_layer(block, 512, layers[3], stride=2)
        self.avgpool = layer.AvgPool2d(7, stride=1)
        self.flatten = layer.Flatten()
        self.fc = layer.Linear(num_classes)
        self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()

        self.register_layers(*layers1, *layers2, *layers3, *layers4)
예제 #4
0
    def create_net(self):
        '''
        Create singa net based on caffe proto files.
            net_proto: caffe prototxt that describes net
            solver_proto: caffe prototxt that describe solver
            input_sample_shape: shape of input data tensor
        return:
            a FeedForwardNet object
        '''
        caffe_net = self.read_net_proto()
        caffe_solver = None
        if self.caffe_solver_path is not None:
            caffe_solver = self.read_solver_proto()
        layer_confs = ''
        flatten_id = 0

        # If the net proto has the input shape
        if len(caffe_net.input_dim) > 0:
            self.input_sample_shape = caffe_net.input_dim
        if len(caffe_net.layer):
            layer_confs = caffe_net.layer
        elif len(caffe_net.layers):
            layer_confs = caffe_net.layers
        else:
            raise Exception('Invalid proto file!')

        net = ffnet.FeedForwardNet()
        for i in range(len(layer_confs)):
            if layer_confs[i].type == 'Data' or layer_confs[i].type == 5:
                continue
            elif layer_confs[i].type == 'Input':
                self.input_sample_shape = layer_confs[i].input_param.shape[0].dim[1:]
            elif layer_confs[i].type == 'SoftmaxWithLoss' or layer_confs[i].type == 21:
                net.loss = loss.SoftmaxCrossEntropy()
            elif layer_confs[i].type == 'EuclideanLoss' or layer_confs[i].type == 7:
                net.loss = loss.SquareError()
            elif layer_confs[i].type == 'Accuracy' or layer_confs[i].type == 1:
                net.metric = metric.Accuracy()
            else:
                strConf = layer_confs[i].SerializeToString()
                conf = model_pb2.LayerConf()
                conf.ParseFromString(strConf)
                if caffe_solver:
                    layer.engine = self.convert_engine(
                        layer_confs[i], caffe_solver.solver_mode)
                else:
                    # if caffe_solver is None,
                    layer.engine = self.convert_engine(layer_confs[i], 0)
                lyr = layer.Layer(conf.name, conf)
                if len(net.layers) == 0:
                    print('input sample shape: ', self.input_sample_shape)
                    lyr.setup(self.input_sample_shape)
                    print(lyr.name, lyr.get_output_sample_shape())
                if layer_confs[i].type == 'InnerProduct' or layer_confs[i].type == 14:
                    net.add(layer.Flatten('flat' + str(flatten_id)))
                    flatten_id += 1
                net.add(lyr)

        return net
예제 #5
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    ConvBnReLU(net, 'conv1_1', 64, (3, 32, 32))
    net.add(layer.Dropout('drop1', 0.3))
    ConvBnReLU(net, 'conv1_2', 64)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 512)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 512)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 512)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dropout('drop_flat', 0.5))
    net.add(layer.Dense('ip1', 512))
    net.add(layer.BatchNormalization('batchnorm_ip1'))
    net.add(layer.Activation('relu_ip1'))
    net.add(layer.Dropout('drop_ip2', 0.5))
    net.add(layer.Dense('ip2', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, 3 * 3 * p.shape[0])
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
예제 #6
0
파일: mnist_cnn.py 프로젝트: zlheui/singa
 def __init__(self):
     self.conv1 = layer.Conv2d(1, 20, 5, padding=0)
     self.conv2 = layer.Conv2d(20, 50, 5, padding=0)
     self.linear1 = layer.Linear(4 * 4 * 50, 500)
     self.linear2 = layer.Linear(500, 10)
     self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
     self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
     self.relu1 = layer.ReLU()
     self.relu2 = layer.ReLU()
     self.relu3 = layer.ReLU()
     self.flatten = layer.Flatten()
예제 #7
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    net.add(
        layer.Conv2D('conv1',
                     nb_kernels=32,
                     kernel=7,
                     stride=3,
                     pad=1,
                     input_sample_shape=input_shape))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv2', nb_kernels=64, kernel=5, stride=3))
    net.add(layer.Activation('relu2'))
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv3', nb_kernels=128, kernel=3, stride=1, pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv4', nb_kernels=256, kernel=3, stride=1))
    net.add(layer.Activation('relu4'))
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 256))
    net.add(layer.Activation('relu5'))
    net.add(layer.Dense('ip6', 16))
    net.add(layer.Activation('relu6'))
    net.add(layer.Dense('ip7', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
예제 #8
0
    def test_save_load(self):
        ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
        ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
        ffn.add(layer.Flatten('flat'))
        # ffn.add(layer.BatchNorm('bn'))
        ffn.add(layer.Dense('dense', num_output=4))
        for pname, pval in zip(ffn.param_names(), ffn.param_values()):
            pval.set_value(0.1)
        ffn.save('test_snaphost')
        ffn.save('test_pickle', use_pickle=True)

        ffn.load('test_snaphost')
        ffn.load('test_pickle', use_pickle=True)
예제 #9
0
파일: cnn.py 프로젝트: zlheui/singa
 def __init__(self, num_classes=10, num_channels=1):
     super(CNN, self).__init__()
     self.num_classes = num_classes
     self.input_size = 28
     self.dimension = 4
     self.conv1 = layer.Conv2d(num_channels, 20, 5, padding=0, activation="RELU")
     self.conv2 = layer.Conv2d(20, 50, 5, padding=0, activation="RELU")
     self.linear1 = layer.Linear(500)
     self.linear2 = layer.Linear(num_classes)
     self.pooling1 = layer.MaxPool2d(2, 2, padding=0)
     self.pooling2 = layer.MaxPool2d(2, 2, padding=0)
     self.relu = layer.ReLU()
     self.flatten = layer.Flatten()
     self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()
예제 #10
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLUPool(net, 'conv1', 32, input_shape)
    ConvBnReLUPool(net, 'conv2', 64)
    ConvBnReLUPool(net, 'conv3', 128)
    ConvBnReLUPool(net, 'conv4', 128)
    ConvBnReLUPool(net, 'conv5', 256)
    ConvBnReLUPool(net, 'conv6', 256)
    ConvBnReLUPool(net, 'conv7', 512)
    ConvBnReLUPool(net, 'conv8', 512)

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
예제 #11
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(
        layer.Conv2D("conv1", 16, 3, 1, pad=1, input_sample_shape=(3, 32, 32)))
    net.add(layer.BatchNormalization("bn1"))
    net.add(layer.Activation("relu1"))

    Block(net, "2a", 16, 1)
    Block(net, "2b", 16, 1)
    Block(net, "2c", 16, 1)

    Block(net, "3a", 32, 2)
    Block(net, "3b", 32, 1)
    Block(net, "3c", 32, 1)

    Block(net, "4a", 64, 2)
    Block(net, "4b", 64, 1)
    Block(net, "4c", 64, 1)

    net.add(layer.AvgPooling2D("pool4", 8, 8, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        # print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                # initializer.gaussian(p, 0, math.sqrt(2.0/p.shape[1]))
                initializer.gaussian(p, 0, 9.0 * p.shape[0])
            else:
                initializer.uniform(p, p.shape[0], p.shape[1])
        else:
            p.set_value(0)
        # print name, p.l1()

    return net
예제 #12
0
 def test_train_one_batch(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
     ffn.add(layer.Flatten('flat'))
     ffn.add(layer.Dense('dense', num_output=4))
     for pname, pval in zip(ffn.param_names(), ffn.param_values()):
         pval.set_value(0.1)
     x = tensor.Tensor((4, 3, 12, 12))
     x.gaussian(0, 0.01)
     y = np.asarray([[1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]],
                    dtype=np.int32)
     y = tensor.from_numpy(y)
     o = ffn.forward(True, x)
     ffn.loss.forward(True, o, y)
     g = ffn.loss.backward()
     for pname, pvalue, pgrad in ffn.backward(g):
         self.assertEqual(len(pvalue), len(pgrad))
         for p, g in zip(pvalue, pgrad):
             self.assertEqual(p.size(), g.size())
예제 #13
0
파일: model.py 프로젝트: lzjpaul/modeldb
def create_net(in_shape, hyperpara, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[5]
    print ("kernel_x: ", kernel_x)
    print ("stride_x: ", stride_x)
    net = myffnet.ProbFeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(layer.Conv2D('conv1', 100, kernel=(kernel_y, kernel_x), stride=(stride_y, stride_x), pad=(0, 0),
                         input_sample_shape=(int(in_shape[0]), int(in_shape[1]), int(in_shape[2]))))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 1, pad=0))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('dense', 2))

    for (pname, pvalue) in zip(net.param_names(), net.param_values()):
        if len(pvalue.shape) > 1:
            initializer.gaussian(pvalue, pvalue.shape[0], pvalue.shape[1])
        else:
            pvalue.set_value(0)
        print (pname, pvalue.l1())
    return net
예제 #14
0
 def build_net(self):
     if self.use_cpu:
         layer.engine = 'singacpp'
     else:
         layer.engine = 'cudnn'
     self.net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(),
                                     metric.Accuracy())
     self.net.add(
         Reshape('reshape1', (self.vocab_size, ),
                 input_sample_shape=(self.maxlen, self.vocab_size)))
     self.net.add(layer.Dense('embed',
                              self.embed_size))  # output: (embed_size, )
     self.net.add(layer.Dropout('dropout'))
     self.net.add(Reshape('reshape2', (1, self.maxlen, self.embed_size)))
     self.net.add(
         layer.Conv2D('conv',
                      self.filters, (self.kernel_size, self.embed_size),
                      border_mode='valid'))  # output: (filter, embed_size)
     if self.use_cpu == False:
         self.net.add(layer.BatchNormalization('batchNorm'))
     self.net.add(layer.Activation('activ'))  # output: (filter, embed_size)
     self.net.add(layer.MaxPooling2D('max', stride=self.pool_size))
     self.net.add(layer.Flatten('flatten'))
     self.net.add(layer.Dense('dense', 2))
예제 #15
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}

    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
    net.add(
        layer.Conv2D('conv1',
                     32,
                     5,
                     1,
                     W_specs=W0_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2,
                     input_sample_shape=(
                         3,
                         32,
                         32,
                     )))
    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
    net.add(layer.Activation('relu1'))
    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv2',
                     32,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu2'))
    net.add(layer.AvgPooling2D('pool2', 3, 2, pad=1))
    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv3',
                     64,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
    net.add(layer.Flatten('flat'))
    net.add(
        layer.Dense('dense',
                    10,
                    W_specs=W2_specs.copy(),
                    b_specs=b_specs.copy()))
    for (p, specs) in zip(net.param_values(), net.param_specs()):
        filler = specs.filler
        if filler.type == 'gaussian':
            p.gaussian(filler.mean, filler.std)
        else:
            p.set_value(0)
        print specs.name, filler.type, p.l1()

    return net
예제 #16
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLU(net, 'conv1_1', 64, input_shape)
    #net.add(layer.Dropout('drop1', 0.3))
    net.add(layer.MaxPooling2D('pool0', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv1_2', 128)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 256)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 256)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 256)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv6_1', 512)
    #net.add(layer.Dropout('drop6_1', 0.4))
    #ConvBnReLU(net, 'conv6_2', 512)
    #net.add(layer.Dropout('drop6_2', 0.4))
    #ConvBnReLU(net, 'conv6_3', 512)
    #net.add(layer.MaxPooling2D('pool6', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv7_1', 512)
    #net.add(layer.Dropout('drop7_1', 0.4))
    #ConvBnReLU(net, 'conv7_2', 512)
    #net.add(layer.Dropout('drop7_2', 0.4))
    #ConvBnReLU(net, 'conv7_3', 512)
    #net.add(layer.MaxPooling2D('pool7', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
예제 #17
0
    def __init__(self, num_classes=10, num_channels=3):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()
        self.num_classes = num_classes
        self.input_size = 299
        self.dimension = 4

        self.conv1 = layer.Conv2d(num_channels, 32, 3, 2, 0, bias=False)
        self.bn1 = layer.BatchNorm2d(32)
        self.relu1 = layer.ReLU()

        self.conv2 = layer.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = layer.BatchNorm2d(64)
        self.relu2 = layer.ReLU()
        # do relu here

        self.block1 = Block(64,
                            128,
                            2,
                            2,
                            padding=0,
                            start_with_relu=False,
                            grow_first=True)
        self.block2 = Block(128,
                            256,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)
        self.block3 = Block(256,
                            728,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)

        self.block4 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)

        self.block8 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)

        self.block12 = Block(728,
                             1024,
                             2,
                             2,
                             start_with_relu=True,
                             grow_first=False)

        self.conv3 = layer.SeparableConv2d(1024, 1536, 3, 1, 1)
        self.bn3 = layer.BatchNorm2d(1536)
        self.relu3 = layer.ReLU()

        # do relu here
        self.conv4 = layer.SeparableConv2d(1536, 2048, 3, 1, 1)
        self.bn4 = layer.BatchNorm2d(2048)

        self.relu4 = layer.ReLU()
        self.globalpooling = layer.MaxPool2d(10, 1)
        self.flatten = layer.Flatten()
        self.fc = layer.Linear(num_classes)

        self.softmax_cross_entropy = layer.SoftMaxCrossEntropy()