Beispiel #1
0
def create_resnet(depth=18):
    '''Original resnet, where the there is a relue after the addition layer'''
    net = ffnet.FeedForwardNet()
    net.add(
        Conv2D('input-conv',
               64,
               7,
               2,
               pad=3,
               use_bias=False,
               input_sample_shape=(3, 224, 224)))
    net.add(BatchNormalization('input-bn'))
    net.add(Activation('input_relu'))
    net.add(MaxPooling2D('input_pool', 3, 2, pad=1))
    conf = cfg[depth]
    if depth > 34:
        stage(0, net, conf[0], 64, 64, 256, 1, bottleneck)
        stage(1, net, conf[1], 256, 128, 512, 2, bottleneck)
        stage(2, net, conf[2], 512, 256, 1024, 2, bottleneck)
        stage(3, net, conf[3], 1024, 512, 2048, 2, bottleneck)
    else:
        stage(0, net, conf[0], 64, 64, 64, 1, basicblock)
        stage(1, net, conf[1], 64, 128, 128, 2, basicblock)
        stage(2, net, conf[2], 128, 256, 256, 2, basicblock)
        stage(3, net, conf[3], 256, 512, 512, 2, basicblock)
    net.add(AvgPooling2D('avg', 7, 1, pad=0))
    net.add(Flatten('flat'))
    net.add(Dense('dense', 1000))
    return net
Beispiel #2
0
def create_preact_resnet(depth=200):
    '''Resnet with the batchnorm and relu moved to before the conv layer for each block'''
    net = ffnet.FeedForwardNet()
    net.add(
        Conv2D('input-conv',
               64,
               7,
               2,
               pad=3,
               use_bias=False,
               input_sample_shape=(3, 224, 224)))
    net.add(BatchNormalization('input-bn'))
    net.add(Activation('input_relu'))
    net.add(MaxPooling2D('input_pool', 3, 2, pad=1))
    conf = cfg[depth]
    if depth > 34:
        stage(0, net, conf[0], 64, 64, 256, 1, bottleneck, preact=True)
        stage(1, net, conf[1], 256, 128, 512, 2, bottleneck, preact=True)
        stage(2, net, conf[2], 512, 256, 1024, 2, bottleneck, preact=True)
        stage(3, net, conf[3], 1024, 512, 2048, 2, bottleneck, preact=True)
    else:
        stage(0, net, conf[0], 64, 64, 64, 1, basicblock, preact=True)
        stage(1, net, conf[1], 64, 128, 128, 2, basicblock, preact=True)
        stage(2, net, conf[2], 128, 256, 256, 2, basicblock, preact=True)
        stage(3, net, conf[3], 256, 512, 512, 2, basicblock, preact=True)
    net.add(BatchNormalization('final-bn'))
    net.add(Activation('final-relu'))
    net.add(AvgPooling2D('avg', 7, 1, pad=0))
    net.add(Flatten('flat'))
    net.add(Dense('dense', 1000))
    return net
Beispiel #3
0
    def create_net(self):
        '''
        Create singa net based on caffe proto files.
            net_proto: caffe prototxt that describes net
            solver_proto: caffe prototxt that describe solver
            input_sample_shape: shape of input data tensor
        return:
            a FeedForwardNet object
        '''
        caffe_net = self.read_net_proto()
        caffe_solver = None
        if self.caffe_solver_path is not None:
            caffe_solver = self.read_solver_proto()
        layer_confs = ''
        flatten_id = 0

        # If the net proto has the input shape
        if len(caffe_net.input_dim) > 0:
            self.input_sample_shape = caffe_net.input_dim
        if len(caffe_net.layer):
            layer_confs = caffe_net.layer
        elif len(caffe_net.layers):
            layer_confs = caffe_net.layers
        else:
            raise Exception('Invalid proto file!')

        net = ffnet.FeedForwardNet()
        for i in range(len(layer_confs)):
            if layer_confs[i].type == 'Data' or layer_confs[i].type == 5:
                continue
            elif layer_confs[i].type == 'Input':
                self.input_sample_shape = layer_confs[i].input_param.shape[0].dim[1:]
            elif layer_confs[i].type == 'SoftmaxWithLoss' or layer_confs[i].type == 21:
                net.loss = loss.SoftmaxCrossEntropy()
            elif layer_confs[i].type == 'EuclideanLoss' or layer_confs[i].type == 7:
                net.loss = loss.SquareError()
            elif layer_confs[i].type == 'Accuracy' or layer_confs[i].type == 1:
                net.metric = metric.Accuracy()
            else:
                strConf = layer_confs[i].SerializeToString()
                conf = model_pb2.LayerConf()
                conf.ParseFromString(strConf)
                if caffe_solver:
                    layer.engine = self.convert_engine(
                        layer_confs[i], caffe_solver.solver_mode)
                else:
                    # if caffe_solver is None,
                    layer.engine = self.convert_engine(layer_confs[i], 0)
                lyr = layer.Layer(conf.name, conf)
                if len(net.layers) == 0:
                    print('input sample shape: ', self.input_sample_shape)
                    lyr.setup(self.input_sample_shape)
                    print(lyr.name, lyr.get_output_sample_shape())
                if layer_confs[i].type == 'InnerProduct' or layer_confs[i].type == 14:
                    net.add(layer.Flatten('flat' + str(flatten_id)))
                    flatten_id += 1
                net.add(lyr)

        return net
Beispiel #4
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    ConvBnReLU(net, 'conv1_1', 64, (3, 32, 32))
    net.add(layer.Dropout('drop1', 0.3))
    ConvBnReLU(net, 'conv1_2', 64)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 512)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 512)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 512)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dropout('drop_flat', 0.5))
    net.add(layer.Dense('ip1', 512))
    net.add(layer.BatchNormalization('batchnorm_ip1'))
    net.add(layer.Activation('relu_ip1'))
    net.add(layer.Dropout('drop_ip2', 0.5))
    net.add(layer.Dense('ip2', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, 3 * 3 * p.shape[0])
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Beispiel #5
0
 def test_single_input_output(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     ffn.add(layer.Activation('relu1', input_sample_shape=(2,)))
     ffn.add(layer.Activation('relu2'))
     x = np.array([[-1, 1], [1, 1], [-1, -2]], dtype=np.float32)
     x = tensor.from_numpy(x)
     y = tensor.Tensor((3,))
     y.set_value(0)
     out, _ = ffn.evaluate(x, y)
     self.assertAlmostEqual(out * 3,
             - math.log(1.0/(1+math.exp(1))) - math.log(0.5) -math.log(0.5),
             5);
Beispiel #6
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    net.add(
        layer.Conv2D('conv1',
                     nb_kernels=32,
                     kernel=7,
                     stride=3,
                     pad=1,
                     input_sample_shape=input_shape))
    net.add(layer.Activation('relu1'))
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv2', nb_kernels=64, kernel=5, stride=3))
    net.add(layer.Activation('relu2'))
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv3', nb_kernels=128, kernel=3, stride=1, pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))

    net.add(layer.Conv2D('conv4', nb_kernels=256, kernel=3, stride=1))
    net.add(layer.Activation('relu4'))
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 256))
    net.add(layer.Activation('relu5'))
    net.add(layer.Dense('ip6', 16))
    net.add(layer.Activation('relu6'))
    net.add(layer.Dense('ip7', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Beispiel #7
0
def densenet_base(depth, growth_rate=32, reduction=0.5):
    '''
        rewrite according to pytorch models
        special case of densenet 161
    '''
    if depth == 121:
        stages = [6, 12, 24, 16]
    elif depth == 169:
        stages = [6, 12, 32, 32]
    elif depth == 201:
        stages = [6, 12, 48, 32]
    elif depth == 161:
        stages = [6, 12, 36, 24]
    else:
        print('unknown depth: %d' % depth)
        sys.exit(-1)

    net = ffnet.FeedForwardNet()
    growth_rate = 48 if depth == 161 else 32
    n_channels = 2 * growth_rate

    net.add(
        Conv2D('input/conv',
               n_channels,
               7,
               2,
               pad=3,
               use_bias=conv_bias,
               input_sample_shape=(3, 224, 224)))
    net.add(BatchNormalization('input/bn'))
    net.add(Activation('input/relu'))
    net.add(MaxPooling2D('input/pool', 3, 2, pad=1))

    # Dense-Block 1 and transition (56x56)
    n_channels = add_block('block1', net, n_channels, stages[0], growth_rate)
    add_transition('trans1', net, int(math.floor(n_channels * reduction)))
    n_channels = math.floor(n_channels * reduction)

    # Dense-Block 2 and transition (28x28)
    n_channels = add_block('block2', net, n_channels, stages[1], growth_rate)
    add_transition('trans2', net, int(math.floor(n_channels * reduction)))
    n_channels = math.floor(n_channels * reduction)

    # Dense-Block 3 and transition (14x14)
    n_channels = add_block('block3', net, n_channels, stages[2], growth_rate)
    add_transition('trans3', net, int(math.floor(n_channels * reduction)))
    n_channels = math.floor(n_channels * reduction)

    # Dense-Block 4 and transition (7x7)
    n_channels = add_block('block4', net, n_channels, stages[3], growth_rate)
    add_transition('trans4', net, n_channels, True)

    return net
Beispiel #8
0
 def test_mult_inputs(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     s1 = ffn.add(layer.Activation('relu1', input_sample_shape=(2, )), [])
     s2 = ffn.add(layer.Activation('relu2', input_sample_shape=(2, )), [])
     ffn.add(layer.Merge('merge', input_sample_shape=(2, )), [s1, s2])
     x1 = tensor.Tensor((2, 2))
     x1.set_value(1.1)
     x2 = tensor.Tensor((2, 2))
     x2.set_value(0.9)
     out = ffn.forward(False, {'relu1': x1, 'relu2': x2})
     out = tensor.to_numpy(out)
     self.assertAlmostEqual(np.average(out), 2)
Beispiel #9
0
    def test_save_load(self):
        ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
        ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
        ffn.add(layer.Flatten('flat'))
        # ffn.add(layer.BatchNorm('bn'))
        ffn.add(layer.Dense('dense', num_output=4))
        for pname, pval in zip(ffn.param_names(), ffn.param_values()):
            pval.set_value(0.1)
        ffn.save('test_snaphost')
        ffn.save('test_pickle', use_pickle=True)

        ffn.load('test_snaphost')
        ffn.load('test_pickle', use_pickle=True)
Beispiel #10
0
def create_net(depth, nb_classes, batchnorm=False, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet()
    net = create_layers(net, cfg[depth], (3, 224, 224), batchnorm)
    net.add(Flatten('flat'))
    net.add(Dense('dense/classifier.0', 4096))
    net.add(Activation('act/classifier.1'))
    net.add(Dropout('dropout/classifier.2'))
    net.add(Dense('dense/classifier.3', 4096))
    net.add(Activation('act/classifier.4'))
    net.add(Dropout('dropout/classifier.5'))
    net.add(Dense('dense/classifier.6', nb_classes))
    return net
Beispiel #11
0
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLUPool(net, 'conv1', 32, input_shape)
    ConvBnReLUPool(net, 'conv2', 64)
    ConvBnReLUPool(net, 'conv3', 128)
    ConvBnReLUPool(net, 'conv4', 128)
    ConvBnReLUPool(net, 'conv5', 256)
    ConvBnReLUPool(net, 'conv6', 256)
    ConvBnReLUPool(net, 'conv7', 512)
    ConvBnReLUPool(net, 'conv8', 512)

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Beispiel #12
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    net.add(
        layer.Conv2D("conv1", 16, 3, 1, pad=1, input_sample_shape=(3, 32, 32)))
    net.add(layer.BatchNormalization("bn1"))
    net.add(layer.Activation("relu1"))

    Block(net, "2a", 16, 1)
    Block(net, "2b", 16, 1)
    Block(net, "2c", 16, 1)

    Block(net, "3a", 32, 2)
    Block(net, "3b", 32, 1)
    Block(net, "3c", 32, 1)

    Block(net, "4a", 64, 2)
    Block(net, "4b", 64, 1)
    Block(net, "4c", 64, 1)

    net.add(layer.AvgPooling2D("pool4", 8, 8, border_mode='valid'))
    net.add(layer.Flatten('flat'))
    net.add(layer.Dense('ip5', 10))
    print 'Start intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        # print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                # initializer.gaussian(p, 0, math.sqrt(2.0/p.shape[1]))
                initializer.gaussian(p, 0, 9.0 * p.shape[0])
            else:
                initializer.uniform(p, p.shape[0], p.shape[1])
        else:
            p.set_value(0)
        # print name, p.l1()

    return net
Beispiel #13
0
 def test_train_one_batch(self):
     ffn = net.FeedForwardNet(loss.SoftmaxCrossEntropy())
     ffn.add(layer.Conv2D('conv', 4, 3, input_sample_shape=(3, 12, 12)))
     ffn.add(layer.Flatten('flat'))
     ffn.add(layer.Dense('dense', num_output=4))
     for pname, pval in zip(ffn.param_names(), ffn.param_values()):
         pval.set_value(0.1)
     x = tensor.Tensor((4, 3, 12, 12))
     x.gaussian(0, 0.01)
     y = np.asarray([[1, 0, 0], [0, 0, 1], [0, 0, 1], [0, 1, 0]],
                    dtype=np.int32)
     y = tensor.from_numpy(y)
     o = ffn.forward(True, x)
     ffn.loss.forward(True, o, y)
     g = ffn.loss.backward()
     for pname, pvalue, pgrad in ffn.backward(g):
         self.assertEqual(len(pvalue), len(pgrad))
         for p, g in zip(pvalue, pgrad):
             self.assertEqual(p.size(), g.size())
Beispiel #14
0
def create_net(shape, weight_path='bvlc_googlenet.pickle'):
    net = ffnet.FeedForwardNet()
    net.add(Conv2D('conv1/7x7_s2', 64, 7, 2, pad=3, input_sample_shape=shape))
    c1 = net.add(Activation('conv1/relu_7x7'))
    pool1 = pool(net, c1, 'pool1/3x3_s2', 3, 2)
    norm1 = net.add(LRN('pool1/norm1', 5, 0.0001, 0.75))
    c3x3r = conv(net, norm1, 'conv2', 64, 1, suffix='3x3_reduce')
    c3x3 = conv(net, c3x3r, 'conv2', 192, 3, pad=1, suffix='3x3')
    norm2 = net.add(LRN('conv2/norm2', 5, 0.0001, 0.75))
    pool2 = pool(net, norm2, 'pool2/3x3_s2', 3, 2)

    i3a = inception(net, pool2, 'inception_3a', 64, 96, 128, 16, 32, 32)
    i3b = inception(net, i3a, 'inception_3b', 128, 128, 192, 32, 96, 64)
    pool3 = pool(net, i3b, 'pool3/3x3_s2', 3, 2)
    i4a = inception(net, pool3, 'inception_4a', 192, 96, 208, 16, 48, 64)
    i4b = inception(net, i4a, 'inception_4b', 160, 112, 224, 24, 64, 64)
    i4c = inception(net, i4b, 'inception_4c', 128, 128, 256, 24, 64, 64)
    i4d = inception(net, i4c, 'inception_4d', 112, 144, 288, 32, 64, 64)
    i4e = inception(net, i4d, 'inception_4e', 256, 160, 320, 32, 128, 128)
    pool4 = pool(net, i4e, 'pool4/3x3_s2', 3, 2)
    i5a = inception(net, pool4, 'inception_5a', 256, 160, 320, 32, 128, 128)
    i5b = inception(net, i5a, 'inception_5b', 384, 192, 384, 48, 128, 128)
    pool5 = net.add(AvgPooling2D('pool5/7x7_s1', 7, 1, pad=0))
    drop5 = net.add(Dropout('drop', 0.4))
    flat = net.add(Flatten('flat'))
    dense = net.add(Dense('loss3/classifier', 1000))
    # prob=net.add(Softmax('softmax'))

    net.load(weight_path, use_pickle=True)
    print('total num of params %d' % (len(net.param_names())))
    # SINGA and Caffe have different layout for the weight matrix of the dense
    # layer
    for key, val in zip(net.param_names(), net.param_values()):
        # print key
        if key == 'loss3/classifier_weight' or key == 'loss3/classifier/weight':
            tmp = tensor.to_numpy(val)
            tmp = tmp.reshape(tmp.shape[::-1])
            val.copy_from_numpy(np.transpose(tmp))
    return net
Beispiel #15
0
def create_wide_resnet(depth=50):
    '''Similar original resnet except that a<=b<=c for the bottleneck block'''
    net = ffnet.FeedForwardNet()
    net.add(
        Conv2D('input-conv',
               64,
               7,
               2,
               pad=3,
               use_bias=False,
               input_sample_shape=(3, 224, 224)))
    net.add(BatchNormalization('input-bn'))
    net.add(Activation('input_relu'))
    net.add(MaxPooling2D('input_pool', 3, 2, pad=1))

    stage(0, net, 3, 64, 128, 256, 1, bottleneck)
    stage(1, net, 4, 256, 256, 512, 2, bottleneck)
    stage(2, net, 6, 512, 512, 1024, 2, bottleneck)
    stage(3, net, 3, 1024, 1024, 2048, 2, bottleneck)

    net.add(AvgPooling2D('avg_pool', 7, 1, pad=0))
    net.add(Flatten('flag'))
    net.add(Dense('dense', 1000))
    return net
Beispiel #16
0
 def build_net(self):
     if self.use_cpu:
         layer.engine = 'singacpp'
     else:
         layer.engine = 'cudnn'
     self.net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(),
                                     metric.Accuracy())
     self.net.add(
         Reshape('reshape1', (self.vocab_size, ),
                 input_sample_shape=(self.maxlen, self.vocab_size)))
     self.net.add(layer.Dense('embed',
                              self.embed_size))  # output: (embed_size, )
     self.net.add(layer.Dropout('dropout'))
     self.net.add(Reshape('reshape2', (1, self.maxlen, self.embed_size)))
     self.net.add(
         layer.Conv2D('conv',
                      self.filters, (self.kernel_size, self.embed_size),
                      border_mode='valid'))  # output: (filter, embed_size)
     if self.use_cpu == False:
         self.net.add(layer.BatchNormalization('batchNorm'))
     self.net.add(layer.Activation('activ'))  # output: (filter, embed_size)
     self.net.add(layer.MaxPooling2D('max', stride=self.pool_size))
     self.net.add(layer.Flatten('flatten'))
     self.net.add(layer.Dense('dense', 2))
Beispiel #17
0
def create_net(use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'

    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())
    W0_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.0001}
    W1_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01}
    W2_specs = {'init': 'gaussian', 'mean': 0, 'std': 0.01, 'decay_mult': 250}

    b_specs = {'init': 'constant', 'value': 0, 'lr_mult': 2, 'decay_mult': 0}
    net.add(
        layer.Conv2D('conv1',
                     32,
                     5,
                     1,
                     W_specs=W0_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2,
                     input_sample_shape=(
                         3,
                         32,
                         32,
                     )))
    net.add(layer.MaxPooling2D('pool1', 3, 2, pad=1))
    net.add(layer.Activation('relu1'))
    net.add(layer.LRN(name='lrn1', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv2',
                     32,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu2'))
    net.add(layer.AvgPooling2D('pool2', 3, 2, pad=1))
    net.add(layer.LRN('lrn2', size=3, alpha=5e-5))
    net.add(
        layer.Conv2D('conv3',
                     64,
                     5,
                     1,
                     W_specs=W1_specs.copy(),
                     b_specs=b_specs.copy(),
                     pad=2))
    net.add(layer.Activation('relu3'))
    net.add(layer.AvgPooling2D('pool3', 3, 2, pad=1))
    net.add(layer.Flatten('flat'))
    net.add(
        layer.Dense('dense',
                    10,
                    W_specs=W2_specs.copy(),
                    b_specs=b_specs.copy()))
    for (p, specs) in zip(net.param_values(), net.param_specs()):
        filler = specs.filler
        if filler.type == 'gaussian':
            p.gaussian(filler.mean, filler.std)
        else:
            p.set_value(0)
        print specs.name, filler.type, p.l1()

    return net
Beispiel #18
0
    def __init__(self,
                 dev,
                 rows=28,
                 cols=28,
                 channels=1,
                 noise_size=100,
                 hidden_size=128,
                 batch=128,
                 interval=1000,
                 learning_rate=0.001,
                 epochs=1000000,
                 d_steps=3,
                 g_steps=1,
                 dataset_filepath='mnist.pkl.gz',
                 file_dir='lsgan_images/'):
        self.dev = dev
        self.rows = rows
        self.cols = cols
        self.channels = channels
        self.feature_size = self.rows * self.cols * self.channels
        self.noise_size = noise_size
        self.hidden_size = hidden_size
        self.batch = batch
        self.batch_size = self.batch // 2
        self.interval = interval
        self.learning_rate = learning_rate
        self.epochs = epochs
        self.d_steps = d_steps
        self.g_steps = g_steps
        self.dataset_filepath = dataset_filepath
        self.file_dir = file_dir

        self.g_w0_specs = {
            'init': 'xavier',
        }
        self.g_b0_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.g_w1_specs = {
            'init': 'xavier',
        }
        self.g_b1_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.gen_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        self.gen_net_fc_0 = layer.Dense(name='g_fc_0',
                                        num_output=self.hidden_size,
                                        use_bias=True,
                                        W_specs=self.g_w0_specs,
                                        b_specs=self.g_b0_specs,
                                        input_sample_shape=(self.noise_size, ))
        self.gen_net_relu_0 = layer.Activation(
            name='g_relu_0',
            mode='relu',
            input_sample_shape=(self.hidden_size, ))
        self.gen_net_fc_1 = layer.Dense(
            name='g_fc_1',
            num_output=self.feature_size,
            use_bias=True,
            W_specs=self.g_w1_specs,
            b_specs=self.g_b1_specs,
            input_sample_shape=(self.hidden_size, ))
        self.gen_net_sigmoid_1 = layer.Activation(
            name='g_relu_1',
            mode='sigmoid',
            input_sample_shape=(self.feature_size, ))
        self.gen_net.add(self.gen_net_fc_0)
        self.gen_net.add(self.gen_net_relu_0)
        self.gen_net.add(self.gen_net_fc_1)
        self.gen_net.add(self.gen_net_sigmoid_1)
        for (p, specs) in zip(self.gen_net.param_values(),
                              self.gen_net.param_specs()):
            filler = specs.filler
            if filler.type == 'gaussian':
                p.gaussian(filler.mean, filler.std)
            elif filler.type == 'xavier':
                initializer.xavier(p)
            else:
                p.set_value(0)
            print(specs.name, filler.type, p.l1())
        self.gen_net.to_device(self.dev)

        self.d_w0_specs = {
            'init': 'xavier',
        }
        self.d_b0_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.d_w1_specs = {
            'init': 'xavier',
        }
        self.d_b1_specs = {
            'init': 'constant',
            'value': 0,
        }
        self.dis_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        self.dis_net_fc_0 = layer.Dense(
            name='d_fc_0',
            num_output=self.hidden_size,
            use_bias=True,
            W_specs=self.d_w0_specs,
            b_specs=self.d_b0_specs,
            input_sample_shape=(self.feature_size, ))
        self.dis_net_relu_0 = layer.Activation(
            name='d_relu_0',
            mode='relu',
            input_sample_shape=(self.hidden_size, ))
        self.dis_net_fc_1 = layer.Dense(
            name='d_fc_1',
            num_output=1,
            use_bias=True,
            W_specs=self.d_w1_specs,
            b_specs=self.d_b1_specs,
            input_sample_shape=(self.hidden_size, ))
        self.dis_net.add(self.dis_net_fc_0)
        self.dis_net.add(self.dis_net_relu_0)
        self.dis_net.add(self.dis_net_fc_1)
        for (p, specs) in zip(self.dis_net.param_values(),
                              self.dis_net.param_specs()):
            filler = specs.filler
            if filler.type == 'gaussian':
                p.gaussian(filler.mean, filler.std)
            elif filler.type == 'xavier':
                initializer.xavier(p)
            else:
                p.set_value(0)
            print(specs.name, filler.type, p.l1())
        self.dis_net.to_device(self.dev)

        self.combined_net = ffnet.FeedForwardNet(loss.SquaredError(), )
        for l in self.gen_net.layers:
            self.combined_net.add(l)
        for l in self.dis_net.layers:
            self.combined_net.add(l)
        self.combined_net.to_device(self.dev)
Beispiel #19
0
def inception_v4_base(sample_shape,
                      final_endpoint='Inception/Mixed_7d',
                      aux_endpoint='Inception/Mixed_6e'):
    """Creates the Inception V4 network up to the given final endpoint.

    Endpoint name list: 'InceptionV4/' +
        ['Conv2d_1a_3x3', 'Conv2d_2a_3x3', 'Conv2d_2b_3x3',
        'Mixed_3a', 'Mixed_4a', 'Mixed_5a', 'Mixed_5b', 'Mixed_5c', 'Mixed_5d',
        'Mixed_5e', 'Mixed_6a', 'Mixed_6b', 'Mixed_6c', 'Mixed_6d', 'Mixed_6e',
        'Mixed_6f', 'Mixed_6g', 'Mixed_6h', 'Mixed_7a', 'Mixed_7b', 'Mixed_7c',
        'Mixed_7d']

    Args:
        sample_shape: input image sample shape, 3d tuple
        final_endpoint: specifies the endpoint to construct the network up to.
        aux_endpoint: for aux loss.

    Returns:
        the neural net
        the set of end_points from the inception model.
    """
    name = 'InceptionV4'
    end_points = {}
    net = ffnet.FeedForwardNet()

    def final_aux_check(block_name):
        if block_name == final_endpoint:
            return True
        if block_name == aux_endpoint:
            aux = aux_endpoint + '-aux'
            end_points[aux] = net.add(Split(aux, 2))
        return False

    # 299 x 299 x 3
    blk = name + '/Conv2d_1a_3x3'
    net.add(
        Conv2D(blk,
               32,
               3,
               2,
               border_mode='VALID',
               use_bias=False,
               input_sample_shape=sample_shape))
    net.add(BatchNormalization('%s/BatchNorm' % blk))
    end_points[blk] = net.add(Activation('%s/relu' % blk))
    if final_aux_check(blk):
        return net, end_points

    # 149 x 149 x 32
    blk = name + '/Conv2d_2a_3x3'
    end_points[blk] = conv2d(net, blk, 32, 3, border_mode='VALID')
    if final_aux_check(blk):
        return net, end_points

    # 147 x 147 x 32
    blk = name + '/Conv2d_2b_3x3'
    end_points[blk] = conv2d(net, blk, 64, 3)
    if final_aux_check(blk):
        return net, end_points

    # 147 x 147 x 64
    blk = name + '/Mixed_3a'
    s = net.add(Split('%s/Split' % blk, 2))
    br0 = net.add(
        MaxPooling2D('%s/Branch_0/MaxPool_0a_3x3' % blk,
                     3,
                     2,
                     border_mode='VALID'), s)
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_0a_3x3' % blk,
                 96,
                 3,
                 2,
                 border_mode='VALID',
                 src=s)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])
    if final_aux_check(blk):
        return net, end_points

    # 73 x 73 x 160
    blk = name + '/Mixed_4a'
    s = net.add(Split('%s/Split' % blk, 2))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 64, 1, src=s)
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_1a_3x3' % blk,
                 96,
                 3,
                 border_mode='VALID')
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 64, 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, 64, (1, 7))
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, 64, (7, 1))
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_1a_3x3' % blk,
                 96,
                 3,
                 border_mode='VALID')
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])
    if final_aux_check(blk):
        return net, end_points

    # 71 x 71 x 192
    blk = name + '/Mixed_5a'
    s = net.add(Split('%s/Split' % blk, 2))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_1a_3x3' % blk,
                 192,
                 3,
                 2,
                 border_mode='VALID',
                 src=s)
    br1 = net.add(
        MaxPooling2D('%s/Branch_1/MaxPool_1a_3x3' % blk,
                     3,
                     2,
                     border_mode='VALID'), s)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1])
    if final_aux_check(blk):
        return net, end_points

    # 35 x 35 x 384
    # 4 x Inception-A blocks
    for idx in range(4):
        blk = name + '/Mixed_5' + chr(ord('b') + idx)
        end_points[blk] = block_inception_a(blk, net)
        if final_aux_check(blk):
            return net, end_points

    # 35 x 35 x 384
    # Reduction-A block
    blk = name + '/Mixed_6a'
    end_points[blk] = block_reduction_a(blk, net)
    if final_aux_check(blk):
        return net, end_points[blk], end_points

    # 17 x 17 x 1024
    # 7 x Inception-B blocks
    for idx in range(7):
        blk = name + '/Mixed_6' + chr(ord('b') + idx)
        end_points[blk] = block_inception_b(blk, net)
        if final_aux_check(blk):
            return net, end_points

    # 17 x 17 x 1024
    # Reduction-B block
    blk = name + '/Mixed_7a'
    end_points[blk] = block_reduction_b(blk, net)
    if final_aux_check(blk):
        return net, end_points

    # 8 x 8 x 1536
    # 3 x Inception-C blocks
    for idx in range(3):
        blk = name + '/Mixed_7' + chr(ord('b') + idx)
        end_points[blk] = block_inception_c(blk, net)
        if final_aux_check(blk):
            return net, end_points

    assert final_endpoint == blk, \
        'final_enpoint = %s is not in the net' % final_endpoint
def create_net(input_shape, use_cpu=False):
    if use_cpu:
        layer.engine = 'singacpp'
    net = ffnet.FeedForwardNet(loss.SoftmaxCrossEntropy(), metric.Accuracy())

    ConvBnReLU(net, 'conv1_1', 64, input_shape)
    #net.add(layer.Dropout('drop1', 0.3))
    net.add(layer.MaxPooling2D('pool0', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv1_2', 128)
    net.add(layer.MaxPooling2D('pool1', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv2_1', 128)
    net.add(layer.Dropout('drop2_1', 0.4))
    ConvBnReLU(net, 'conv2_2', 128)
    net.add(layer.MaxPooling2D('pool2', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv3_1', 256)
    net.add(layer.Dropout('drop3_1', 0.4))
    ConvBnReLU(net, 'conv3_2', 256)
    net.add(layer.Dropout('drop3_2', 0.4))
    ConvBnReLU(net, 'conv3_3', 256)
    net.add(layer.MaxPooling2D('pool3', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv4_1', 256)
    net.add(layer.Dropout('drop4_1', 0.4))
    ConvBnReLU(net, 'conv4_2', 256)
    net.add(layer.Dropout('drop4_2', 0.4))
    ConvBnReLU(net, 'conv4_3', 256)
    net.add(layer.MaxPooling2D('pool4', 2, 2, border_mode='valid'))
    ConvBnReLU(net, 'conv5_1', 512)
    net.add(layer.Dropout('drop5_1', 0.4))
    ConvBnReLU(net, 'conv5_2', 512)
    net.add(layer.Dropout('drop5_2', 0.4))
    ConvBnReLU(net, 'conv5_3', 512)
    net.add(layer.MaxPooling2D('pool5', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv6_1', 512)
    #net.add(layer.Dropout('drop6_1', 0.4))
    #ConvBnReLU(net, 'conv6_2', 512)
    #net.add(layer.Dropout('drop6_2', 0.4))
    #ConvBnReLU(net, 'conv6_3', 512)
    #net.add(layer.MaxPooling2D('pool6', 2, 2, border_mode='valid'))
    #ConvBnReLU(net, 'conv7_1', 512)
    #net.add(layer.Dropout('drop7_1', 0.4))
    #ConvBnReLU(net, 'conv7_2', 512)
    #net.add(layer.Dropout('drop7_2', 0.4))
    #ConvBnReLU(net, 'conv7_3', 512)
    #net.add(layer.MaxPooling2D('pool7', 2, 2, border_mode='valid'))

    net.add(layer.Flatten('flat'))

    net.add(layer.Dense('ip1', 256))
    net.add(layer.BatchNormalization('bn1'))
    net.add(layer.Activation('relu1'))
    net.add(layer.Dropout('dropout1', 0.2))

    net.add(layer.Dense('ip2', 16))
    net.add(layer.BatchNormalization('bn2'))
    net.add(layer.Activation('relu2'))
    net.add(layer.Dropout('dropout2', 0.2))

    net.add(layer.Dense('ip3', 2))

    print 'Parameter intialization............'
    for (p, name) in zip(net.param_values(), net.param_names()):
        print name, p.shape
        if 'mean' in name or 'beta' in name:
            p.set_value(0.0)
        elif 'var' in name:
            p.set_value(1.0)
        elif 'gamma' in name:
            initializer.uniform(p, 0, 1)
        elif len(p.shape) > 1:
            if 'conv' in name:
                initializer.gaussian(p, 0, p.size())
            else:
                p.gaussian(0, 0.02)
        else:
            p.set_value(0)
        print name, p.l1()

    return net
Beispiel #21
0
def inception_v3_base(name,
                      sample_shape,
                      final_endpoint,
                      aux_endpoint,
                      depth_multiplier=1,
                      min_depth=16):
    """Creates the Inception V3 network up to the given final endpoint.

    Args:
        sample_shape: input image sample shape, 3d tuple
        final_endpoint: specifies the endpoint to construct the network up to.
        aux_endpoint: for aux loss.

    Returns:
        logits: the logits outputs of the model.
        end_points: the set of end_points from the inception model.

    Raises:
        ValueError: if final_endpoint is not set to one of the predefined values
    """
    V3 = 'InceptionV3'
    end_points = {}
    net = ffnet.FeedForwardNet()

    def final_aux_check(block_name):
        if block_name == final_endpoint:
            return True
        if block_name == aux_endpoint:
            aux = aux_endpoint + '-aux'
            end_points[aux] = net.add(Split(aux, 2))
        return False

    def depth(d):
        return max(int(d * depth_multiplier), min_depth)

    blk = V3 + '/Conv2d_1a_3x3'
    # 299 x 299 x 3
    net.add(
        Conv2D(blk,
               depth(32),
               3,
               2,
               border_mode='VALID',
               use_bias=False,
               input_sample_shape=sample_shape))
    net.add(BatchNormalization(blk + '/BatchNorm'))
    end_points[blk] = net.add(Activation(blk + '/relu'))
    if final_aux_check(blk):
        return net, end_points

    # 149 x 149 x 32
    conv2d(net, '%s/Conv2d_2a_3x3' % V3, depth(32), 3, border_mode='VALID')
    # 147 x 147 x 32
    conv2d(net, '%s/Conv2d_2b_3x3' % V3, depth(64), 3)
    # 147 x 147 x 64
    net.add(MaxPooling2D('%s/MaxPool_3a_3x3' % V3, 3, 2, border_mode='VALID'))
    # 73 x 73 x 64
    conv2d(net, '%s/Conv2d_3b_1x1' % V3, depth(80), 1, border_mode='VALID')
    # 73 x 73 x 80.
    conv2d(net, '%s/Conv2d_4a_3x3' % V3, depth(192), 3, border_mode='VALID')
    # 71 x 71 x 192.
    net.add(MaxPooling2D('%s/MaxPool_5a_3x3' % V3, 3, 2, border_mode='VALID'))

    # 35 x 35 x 192.
    blk = V3 + '/Mixed_5b'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(48), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_5x5' % blk, depth(64), 5)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, depth(96), 3)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_3x3' % blk, depth(96), 3)
    net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(32), 1)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_1: 35 x 35 x 288.
    blk = V3 + '/Mixed_5c'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x1' % blk, depth(48), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv_1_0c_5x5' % blk, depth(64), 5)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, depth(96), 3)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_3x3' % blk, depth(96), 3)
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1),
                  src=s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(64), 1)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_2: 35 x 35 x 288.
    blk = V3 + '/Mixed_5d'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(48), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_5x5' % blk, depth(64), 5)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, depth(96), 3)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_3x3' % blk, depth(96), 3)
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(64), 1)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_3: 17 x 17 x 768.
    blk = V3 + '/Mixed_6a'
    s = net.add(Split('%s/Split' % blk, 3))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_1a_1x1' % blk,
                 depth(384),
                 3,
                 2,
                 border_mode='VALID',
                 src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(64), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_3x3' % blk, depth(96), 3)
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_1a_1x1' % blk,
                 depth(96),
                 3,
                 2,
                 border_mode='VALID')
    br2 = net.add(
        MaxPooling2D('%s/Branch_2/MaxPool_1a_3x3' % blk,
                     3,
                     2,
                     border_mode='VALID'), s)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2])
    if final_aux_check(blk):
        return net, end_points

    # mixed4: 17 x 17 x 768.
    blk = V3 + '/Mixed_6b'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(192), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(128), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, depth(128), [1, 7])
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net,
                 '%s/Branch_2/Conv2d_0a_1x1' % blk,
                 depth(128), [1, 1],
                 src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_7x1' % blk, depth(128), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x7' % blk, depth(128), [1, 7])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0d_7x1' % blk, depth(128), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0e_1x7' % blk, depth(192), [1, 7])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_5: 17 x 17 x 768.
    blk = V3 + '/Mixed_6c'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_0a_1x1' % blk,
                 depth(160), [1, 1],
                 src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, depth(160), [1, 7])
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net,
                 '%s/Branch_2/Conv2d_0a_1x1' % blk,
                 depth(160), [1, 1],
                 src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_7x1' % blk, depth(160), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x7' % blk, depth(160), [1, 7])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0d_7x1' % blk, depth(160), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0e_1x7' % blk, depth(192), [1, 7])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_6: 17 x 17 x 768.
    blk = V3 + '/Mixed_6d'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_0a_1x1' % blk,
                 depth(160), [1, 1],
                 src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, depth(160), [1, 7])
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net,
                 '%s/Branch_2/Conv2d_0a_1x1' % blk,
                 depth(160), [1, 1],
                 src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_7x1' % blk, depth(160), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x7' % blk, depth(160), [1, 7])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0d_7x1' % blk, depth(160), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0e_1x7' % blk, depth(192), [1, 7])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    blk = V3 + '/Mixed_6e'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, depth(192), [1, 7])
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net,
                 '%s/Branch_2/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x7' % blk, depth(192), [1, 7])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0d_7x1' % blk, depth(192), [7, 1])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0e_1x7' % blk, depth(192), [1, 7])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_8: 8 x 8 x 1280.
    blk = V3 + '/Mixed_7a'
    s = net.add(Split('%s/Split' % blk, 3))
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br0 = conv2d(net,
                 '%s/Branch_0/Conv2d_1a_3x3' % blk,
                 depth(320), [3, 3],
                 2,
                 border_mode='VALID')
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_0a_1x1' % blk,
                 depth(192), [1, 1],
                 src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x7' % blk, depth(192), [1, 7])
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0c_7x1' % blk, depth(192), [7, 1])
    br1 = conv2d(net,
                 '%s/Branch_1/Conv2d_1a_3x3' % blk,
                 depth(192), [3, 3],
                 2,
                 border_mode='VALID')
    br2 = net.add(
        MaxPooling2D('%s/Branch_2/MaxPool_1a_3x3' % blk,
                     3,
                     2,
                     border_mode='VALID'), s)
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2])
    if final_aux_check(blk):
        return net, end_points

    # mixed_9: 8 x 8 x 2048.
    blk = V3 + '/Mixed_7b'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(320), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(384), 1, src=s)
    s1 = net.add(Split('%s/Branch_1/Split1' % blk, 2))
    br11 = conv2d(net,
                  '%s/Branch_1/Conv2d_0b_1x3' % blk,
                  depth(384), [1, 3],
                  src=s1)
    br12 = conv2d(net,
                  '%s/Branch_1/Conv2d_0b_3x1' % blk,
                  depth(384), [3, 1],
                  src=s1)
    br1 = net.add(Concat('%s/Branch_1/Concat1' % blk, 1), [br11, br12])
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, depth(448), 1, src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, depth(384), 3)
    s2 = net.add(Split('%s/Branch_2/Split2' % blk, 2))
    br21 = conv2d(net,
                  '%s/Branch_2/Conv2d_0c_1x3' % blk,
                  depth(384), [1, 3],
                  src=s2)
    br22 = conv2d(net,
                  '%s/Branch_2/Conv2d_0d_3x1' % blk,
                  depth(384), [3, 1],
                  src=s2)
    br2 = net.add(Concat('%s/Branch_2/Concat2' % blk, 1), [br21, br22])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1),
                  src=s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    if final_aux_check(blk):
        return net, end_points

    # mixed_10: 8 x 8 x 2048.
    blk = V3 + '/Mixed_7c'
    s = net.add(Split('%s/Split' % blk, 4))
    br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, depth(320), 1, src=s)
    br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, depth(384), 1, src=s)
    s1 = net.add(Split('%s/Branch_1/Split1' % blk, 2))
    br11 = conv2d(net,
                  '%s/Branch_1/Conv2d_0b_1x3' % blk,
                  depth(384), [1, 3],
                  src=s1)
    br12 = conv2d(net,
                  '%s/Branch_1/Conv2d_0c_3x1' % blk,
                  depth(384), [3, 1],
                  src=s1)
    br1 = net.add(Concat('%s/Branch_1/Concat1' % blk, 1), [br11, br12])
    br2 = conv2d(net,
                 '%s/Branch_2/Conv2d_0a_1x1' % blk,
                 depth(448), [1, 1],
                 src=s)
    br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x3' % blk, depth(384), [3, 3])
    s2 = net.add(Split('%s/Branch_2/Split2' % blk, 2))
    br21 = conv2d(net,
                  '%s/Branch_2/Conv2d_0c_1x3' % blk,
                  depth(384), [1, 3],
                  src=s2)
    br22 = conv2d(net,
                  '%s/Branch_2/Conv2d_0d_3x1' % blk,
                  depth(384), [3, 1],
                  src=s2)
    br2 = net.add(Concat('%s/Branch_2/Concat2' % blk, 1), [br21, br22])
    br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1),
                  src=s)
    br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, depth(192), [1, 1])
    end_points[blk] = net.add(Concat('%s/Concat' % blk, 1),
                              [br0, br1, br2, br3])
    assert final_endpoint == blk, \
        'final_enpoint = %s is not in the net' % final_endpoint
    return net, end_points