Ejemplo n.º 1
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    net.add(
        layer.Conv2D('conv1',
                     nb_kernels=32,
                     kernel=7,
                     stride=3,
                     pad=1,
                     input_sample_shape=input_shape))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv2', nb_kernels=64, kernel=5, stride=3))
    net.add(layer.Activation('relu2'))
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv3', nb_kernels=128, kernel=3, stride=1, pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv4', nb_kernels=256, kernel=3, stride=1))
    net.add(layer.Activation('relu4'))
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 256))
    net.add(layer.Activation('relu5'))
    net.add(layer.Dense('ip6', 16))
    net.add(layer.Activation('relu6'))
    net.add(layer.Dense('ip7', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Ejemplo n.º 2
0
def Block(net, name, nb_filters, stride):
    split = net.add(layer.Split(name + "-split", 2))
    if stride > 1:
        net.add(layer.Conv2D(name + "-br2-conv", nb_filters, 1, stride, pad=0),
                split)
        br2bn = net.add(layer.BatchNormalization(name + "-br2-bn"))
    net.add(layer.Conv2D(name + "-br1-conv1", nb_filters, 3, stride, pad=1),
            split)
    net.add(layer.BatchNormalization(name + "-br1-bn1"))
    net.add(layer.Activation(name + "-br1-relu"))
    net.add(layer.Conv2D(name + "-br1-conv2", nb_filters, 3, 1, pad=1))
    br1bn2 = net.add(layer.BatchNormalization(name + "-br1-bn2"))
    if stride > 1:
        net.add(layer.Merge(name + "-merge"), [br1bn2, br2bn])
    else:
        net.add(layer.Merge(name + "-merge"), [br1bn2, split])
Ejemplo n.º 3
0
def ConvBnReLU(net, name, nb_filers, sample_shape=None):
    net.add(
        layer.Conv2D(name + '_1',
                     nb_filers,
                     3,
                     1,
                     pad=1,
                     input_sample_shape=sample_shape))
    net.add(layer.BatchNormalization(name + '_2'))
    net.add(layer.Activation(name + '_3'))
Ejemplo n.º 4
0
 def test_conv2D_shape(self):
     in_sample_shape = (3, 224, 224)
     conv = layer.Conv2D('conv',
                         64,
                         3,
                         1,
                         W_specs=self.w,
                         b_specs=self.b,
                         input_sample_shape=in_sample_shape)
     out_sample_shape = conv.get_output_sample_shape()
     self.check_shape(out_sample_shape, (64, 224, 224))
Ejemplo n.º 5
0
def ConvBnReLUPool(net, name, nb_filers, sample_shape=None):
    net.add(
        layer.Conv2D(name + '_conv',
                     nb_filers,
                     3,
                     1,
                     pad=1,
                     input_sample_shape=sample_shape))
    net.add(layer.BatchNormalization(name + '_bn'))
    net.add(layer.Activation(name + '_relu'))
    net.add(layer.MaxPooling2D(name + '_pool', 2, 2, border_mode='valid'))
Ejemplo n.º 6
0
    def test_save_load(self):
        ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
        ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
        ffn.add(layer.Flatten('flat'))
        # ffn.add(layer.BatchNorm('bn'))
        ffn.add(layer.Dense('dense', num_output=4))
        for pname, pval in zip(ffn.param_names(), ffn.param_values()):
            pval.set_value(0.1)
        ffn.save('test_snaphost')
        ffn.save('test_pickle', use_pickle=True)

        ffn.load('test_snaphost')
        ffn.load('test_pickle', use_pickle=True)
Ejemplo n.º 7
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(
        layer.Conv2D("conv1", 16, 3, 1, pad=1, input_sample_shape=(3, 32, 32)))
    net.add(layer.BatchNormalization("bn1"))
    net.add(layer.Activation("relu1"))

    Block(net, "2a", 16, 1)
    Block(net, "2b", 16, 1)
    Block(net, "2c", 16, 1)

    Block(net, "3a", 32, 2)
    Block(net, "3b", 32, 1)
    Block(net, "3c", 32, 1)

    Block(net, "4a", 64, 2)
    Block(net, "4b", 64, 1)
    Block(net, "4c", 64, 1)

    net.add(layer.AvgPooling2D("pool4", 8, 8, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        # print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                # initializer.gaussian(p, 0, math.sqrt(2.0/p.shape[1]))
                initializer.gaussian(p, 0, 9.0 * p.shape[0])
            else:
                initializer.uniform(p, p.shape[0], p.shape[1])
        else:
            p.set_value(0)
        # print name, p.l1()

    return net
Ejemplo n.º 8
0
 def test_train_one_batch(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
     ffn.add(layer.Flatten('flat'))
     ffn.add(layer.Dense('dense', num_output=4))
     for pname, pval in zip(ffn.param_names(), ffn.param_values()):
         pval.set_value(0.1)
     x = tensor.Tensor((4, 3, 12, 12))
     x.gaussian(0, 0.01)
     y = np.asarray([[1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]],
                    dtype=np.int32)
     y = tensor.from_numpy(y)
     o = ffn.forward(True, x)
     ffn.loss.forward(True, o, y)
     g = ffn.loss.backward()
     for pname, pvalue, pgrad in ffn.backward(g):
         self.assertEqual(len(pvalue), len(pgrad))
         for p, g in zip(pvalue, pgrad):
             self.assertEqual(p.size(), g.size())
Ejemplo n.º 9
0
def create_net(in_shape, hyperpara, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    height, width, kernel_y, kernel_x, stride_y, stride_x = hyperpara[0], hyperpara[1], hyperpara[2], hyperpara[3], hyperpara[4], hyperpara[5]
    print ("kernel_x: ", kernel_x)
    print ("stride_x: ", stride_x)
    net = myffnet.ProbFeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(layer.Conv2D('conv1', 100, kernel=(kernel_y, kernel_x), stride=(stride_y, stride_x), pad=(0, 0),
                         input_sample_shape=(int(in_shape[0]), int(in_shape[1]), int(in_shape[2]))))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 1, pad=0))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('dense', 2))

    for (pname, pvalue) in zip(net.param_names(), net.param_values()):
        if len(pvalue.shape) > 1:
            initializer.gaussian(pvalue, pvalue.shape[0], pvalue.shape[1])
        else:
            pvalue.set_value(0)
        print (pname, pvalue.l1())
    return net
Ejemplo n.º 10
0
 def build_net(self):
     if self.use_cpu:
         layer.engine = 'singacpp'
     else:
         layer.engine = 'cudnn'
     self.net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(),
                                     metric.Accuracy())
     self.net.add(
         Reshape('reshape1', (self.vocab_size, ),
                 input_sample_shape=(self.maxlen, self.vocab_size)))
     self.net.add(layer.Dense('embed',
                              self.embed_size))  # output: (embed_size, )
     self.net.add(layer.Dropout('dropout'))
     self.net.add(Reshape('reshape2', (1, self.maxlen, self.embed_size)))
     self.net.add(
         layer.Conv2D('conv',
                      self.filters, (self.kernel_size, self.embed_size),
                      border_mode='valid'))  # output: (filter, embed_size)
     if self.use_cpu == False:
         self.net.add(layer.BatchNormalization('batchNorm'))
     self.net.add(layer.Activation('activ'))  # output: (filter, embed_size)
     self.net.add(layer.MaxPooling2D('max', stride=self.pool_size))
     self.net.add(layer.Flatten('flatten'))
     self.net.add(layer.Dense('dense', 2))
Ejemplo n.º 11
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}

    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
    net.add(
        layer.Conv2D('conv1',
                     32,
                     5,
                     1,
                     W_specs=W0_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2,
                     input_sample_shape=(
                         3,
                         32,
                         32,
                     )))
    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
    net.add(layer.Activation('relu1'))
    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv2',
                     32,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu2'))
    net.add(layer.AvgPooling2D('pool2', 3, 2, pad=1))
    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv3',
                     64,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
    net.add(layer.Flatten('flat'))
    net.add(
        layer.Dense('dense',
                    10,
                    W_specs=W2_specs.copy(),
                    b_specs=b_specs.copy()))
    for (p, specs) in zip(net.param_values(), net.param_specs()):
        filler = specs.filler
        if filler.type == 'gaussian':
            p.gaussian(filler.mean, filler.std)
        else:
            p.set_value(0)
        print specs.name, filler.type, p.l1()

    return net
Ejemplo n.º 12
0
    def test_conv2D_forward_backward(self):
        in_sample_shape = (1, 3, 3)
        conv = layer.Conv2D('conv',
                            1,
                            3,
                            2,
                            W_specs=self.w,
                            b_specs=self.b,
                            pad=1,
                            input_sample_shape=in_sample_shape)
        # cuda = device.create_cuda_gpu()
        # conv.to_device(cuda)
        params = conv.param_values()

        raw_x = np.arange(9, dtype=np.float32) + 1
        x = tensor.from_numpy(raw_x)
        x.reshape((1, 1, 3, 3))
        w = np.array([1, 1, 0, 0, 0, -1, 0, 1, 0], dtype=np.float32)
        params[0].copy_from_numpy(w)
        params[1].set_value(1.0)

        # x.to_device(cuda)
        y = conv.forward(model_pb2.kTrain, x)
        # y.to_host()
        npy = tensor.to_numpy(y).flatten()

        self.assertAlmostEqual(3.0, npy[0])
        self.assertAlmostEqual(7.0, npy[1])
        self.assertAlmostEqual(-3.0, npy[2])
        self.assertAlmostEqual(12.0, npy[3])

        dy = np.asarray([0.1, 0.2, 0.3, 0.4],
                        dtype=np.float32).reshape(y.shape)
        grad = tensor.from_numpy(dy)
        # grad.to_device(cuda)
        (dx, [dw, db]) = conv.backward(model_pb2.kTrain, grad)
        dx.to_host()
        dw.to_host()
        dx = tensor.to_numpy(dx).flatten()
        dw = tensor.to_numpy(dw).flatten()
        dy = dy.flatten()
        self.assertAlmostEquals(dy[0] * w[4], dx[0])
        self.assertAlmostEquals(dy[0] * w[5] + dy[1] * w[3], dx[1])
        self.assertAlmostEquals(dy[1] * w[4], dx[2])
        self.assertAlmostEquals(dy[0] * w[7] + dy[2] * w[1], dx[3])
        self.assertAlmostEquals(
            dy[0] * w[8] + dy[1] * w[6] + dy[2] * w[2] + dy[3] * w[0], dx[4])
        self.assertAlmostEquals(dy[1] * w[7] + dy[3] * w[1], dx[5])
        self.assertAlmostEquals(dy[2] * w[4], dx[6])
        self.assertAlmostEquals(dy[2] * w[5] + dy[3] * w[3], dx[7])
        self.assertAlmostEquals(dy[3] * w[4], dx[8])

        self.assertAlmostEquals(dy[3] * raw_x[4], dw[0])
        self.assertAlmostEquals(dy[3] * raw_x[5] + dy[2] * raw_x[3], dw[1])
        self.assertAlmostEquals(dy[2] * raw_x[4], dw[2])
        self.assertAlmostEquals(dy[1] * raw_x[1] + dy[3] * raw_x[7], dw[3])
        self.assertAlmostEquals(
            dy[0] * raw_x[0] + dy[1] * raw_x[2] + dy[2] * raw_x[6] +
            dy[3] * raw_x[8], dw[4], 5)
        self.assertAlmostEquals(dy[0] * raw_x[1] + dy[2] * raw_x[7], dw[5])
        self.assertAlmostEquals(dy[1] * raw_x[4], dw[6])
        self.assertAlmostEquals(dy[0] * raw_x[3] + dy[1] * raw_x[5], dw[7])
        self.assertAlmostEquals(dy[0] * raw_x[4], dw[8])