def mobilenet(x, is_train=True, reuse=False):
    with tf.variable_scope("mobilenet", reuse=reuse):
        n = InputLayer(x)
        n = conv_block(n, 32, strides=(2, 2), is_train=is_train, name="conv")
        n = depthwise_conv_block(n, 64, is_train=is_train, name="depth1")

        n = depthwise_conv_block(n, 128, strides=(2, 2), is_train=is_train, name="depth2")
        n = depthwise_conv_block(n, 128, is_train=is_train, name="depth3")

        n = depthwise_conv_block(n, 256, strides=(2, 2), is_train=is_train, name="depth4")
        n = depthwise_conv_block(n, 256, is_train=is_train, name="depth5")

        n = depthwise_conv_block(n, 512, strides=(2, 2), is_train=is_train, name="depth6")
        n = depthwise_conv_block(n, 512, is_train=is_train, name="depth7")
        n = depthwise_conv_block(n, 512, is_train=is_train, name="depth8")
        n = depthwise_conv_block(n, 512, is_train=is_train, name="depth9")
        n = depthwise_conv_block(n, 512, is_train=is_train, name="depth10")
        n = depthwise_conv_block(n, 512, is_train=is_train, name="depth11")

        n = depthwise_conv_block(n, 1024, strides=(2, 2), is_train=is_train, name="depth12")
        n = depthwise_conv_block(n, 1024, is_train=is_train, name="depth13")

        n = GlobalMeanPool2d(n)
        # n = DropoutLayer(n, 1-1e-3, True, is_train, name='drop')
        # n = DenseLayer(n, 1000, act=tf.identity, name='output')   # equal
        n = ReshapeLayer(n, [-1, 1, 1, 1024])
        n = Conv2d(n, 1000, (1, 1), (1, 1), name='out')
        n = FlattenLayer(n)
    return n
Пример #2
0
def get_E_x2za(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
               name="Encoder_x2za"):
    # ref: Multimodal Unsupervised Image-to-Image Translation
    # input: (batch_size_train, 256, 256, 3)
    # output: vector (batch_size_train, za_dim)
    w_init = tf.random_normal_initializer(stddev=0.02)
    ni = Input(x_shape)
    n = Conv2d(64, (7, 7), (1, 1), act=tf.nn.relu, W_init=w_init)(ni)
    n = Conv2d(128, (4, 4), (2, 2), act=tf.nn.relu, W_init=w_init)(n)
    n = Conv2d(256, (4, 4), (2, 2), act=tf.nn.relu, W_init=w_init)(n)
    n = Conv2d(256, (4, 4), (2, 2), act=tf.nn.relu, W_init=w_init)(n)
    n = GlobalMeanPool2d()(n)
    n = Flatten()(n)
    n = Dense(flags.za_dim)(n)
    M = Model(inputs=ni, outputs=n, name=name)
    return M
Пример #3
0
    def mobilenetv1(self, x, end_with='out', is_train=False, reuse=None):
        with tf.variable_scope("mobilenetv1", reuse=reuse):
            n = InputLayer(x)
            n = self.conv_block(n, 32, strides=(2, 2), is_train=is_train, name="conv")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 64, is_train=is_train, name="depth1")
            if end_with in n.outputs.name: return n

            n = self.depthwise_conv_block(n, 128, strides=(2, 2), is_train=is_train, name="depth2")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 128, is_train=is_train, name="depth3")
            if end_with in n.outputs.name: return n

            n = self.depthwise_conv_block(n, 256, strides=(2, 2), is_train=is_train, name="depth4")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 256, is_train=is_train, name="depth5")
            if end_with in n.outputs.name: return n

            n = self.depthwise_conv_block(n, 512, strides=(2, 2), is_train=is_train, name="depth6")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth7")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth8")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth9")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth10")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth11")
            if end_with in n.outputs.name: return n

            n = self.depthwise_conv_block(n, 1024, strides=(2, 2), is_train=is_train, name="depth12")
            if end_with in n.outputs.name: return n
            n = self.depthwise_conv_block(n, 1024, is_train=is_train, name="depth13")
            if end_with in n.outputs.name: return n

            n = GlobalMeanPool2d(n, name='globalmeanpool')
            if end_with in n.outputs.name: return n
            # n = DropoutLayer(n, 1-1e-3, True, is_train, name='drop')
            # n = DenseLayer(n, 1000, act=tf.identity, name='output')   # equal
            n = ReshapeLayer(n, [-1, 1, 1, 1024], name='reshape')
            if end_with in n.outputs.name: return n
            n = Conv2d(n, 1000, (1, 1), (1, 1), name='out')
            n = FlattenLayer(n, name='flatten')
            if end_with == 'out': return n

            raise Exception("end_with : conv, depth1, depth2 ... depth13, globalmeanpool, out")
Пример #4
0
def get_Ea(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
           name=None):  # input: (1, 256, 256, 3)
    # ref: DRIT source code (Pytorch Implementation)
    w_init = tf.random_normal_initializer(stddev=0.02)
    ndf = 64
    ni = Input(x_shape)
    nn = Conv2d(ndf, (7, 7), (1, 1), padding='VALID', W_init=w_init,
                act=None)(ni)

    ## Basic Blocks * 3
    for i in range(1, 4):
        ## Basic Block
        # conv part
        n = Lambda(lambda x: tf.nn.leaky_relu(x, 0.2))(nn)  # leaky relu (0.2)
        n = Conv2d(ndf * i, (3, 3), (1, 1),
                   padding='VALID',
                   W_init=w_init,
                   act=lrelu)(n)  # conv3x3
        n = Conv2d(ndf * (i + 1), (3, 3), (1, 1),
                   padding='VALID',
                   W_init=w_init,
                   act=None)(n)  # conv3x3 in convMeanpool
        n = MeanPool2d((2, 2), (2, 2))(n)  # meanPool2d in convMeanpool
        # shortcut part
        ns = MeanPool2d((2, 2), (2, 2))(nn)
        ns = Conv2d(ndf * (i + 1), (3, 3), (1, 1),
                    padding='VALID',
                    W_init=w_init)(ns)
        nn = Elementwise(tf.add)([n, ns])

    n = GlobalMeanPool2d()(nn)
    n_mu = Dense(n_units=flags.za_dim, W_init=w_init, name="mean_linear")(n)
    n_log_var = Dense(n_units=flags.za_dim, W_init=w_init,
                      name="var_linear")(n)

    # Sampling using reparametrization trick
    def sample(input):
        n_mu = input[0]
        n_log_var = input[1]
        epsilon = tf.random.truncated_normal(n_mu.shape)
        stddev = tf.exp(n_log_var)
        out = n_mu + stddev * epsilon
        return out

    no = Lambda(sample)([n_mu, n_log_var])
    M = Model(inputs=ni, outputs=[no, n_mu, n_log_var], name=name)
    return M
Пример #5
0
    def ShuffleNetV1(self, inputlayer, name):
        inputlayer = InputLayer(inputlayer, name='input')#32*32*2
        #print(inputlayer.outputs.get_shape())
        x = Conv2d(inputlayer, 24, (3, 3), strides=(2, 2), padding='SAME', act=tf.nn.relu, name=name+'_Con2d')###24
        x = MaxPool2d(x, filter_size=(3, 3), strides=(2, 2), padding='SAME', name=name+'_MaxPool')
        x = self.stage(x, n_filter=384, filter_size=(3, 3), groups=8, repeat=4, stage=2, name=name+'_stage1')
        #print("stage1 finished!!!!!!!!!!!!!!!!")
        x = self.stage(x, n_filter=768, filter_size=(3, 3), groups=8, repeat=8, stage=3, name=name+'_stage2')
        #print("stage2 finished!!!!!!!!!!!!!!!!")
        x = self.stage(x, n_filter=1536, filter_size=(3, 3), groups=8, repeat=4, stage=4, name=name+'_stage3')
        #print("stage3 finished!!!!!!!!!!!!!!!!")
        print("stage3", x.outputs.get_shape())
        print(x.count_params())
        #x = GlobalMaxPool2d(x, name=name+'_GlobalMaxPool')
        #print("GMP", x.outputs.get_shape())
        #print(x.count_params())
        x = GlobalMeanPool2d(x, name=name+'_GlobalMaxPool')
        print("GAP", x.outputs.get_shape())
        print(x.count_params())
        x = DenseLayer(x, name=name+'_Dense')
        print("DENSE", x.outputs.get_shape())
        print(x.count_params())

        return x
Пример #6
0
def MobileNetV1(pretrained=False, end_with='out', name=None):
    """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1].

    Parameters
    ----------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out]. Default ``out`` i.e. the whole model.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_mobilenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_mobilenetv1.py>`__

    >>> # get the whole model with pretrained weights
    >>> mobilenetv1 = tl.models.MobileNetV1(pretrained=True)
    >>> # use for inferencing
    >>> output = mobilenetv1(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract features and Train a classifier with 100 classes

    >>> # get model without the last layer
    >>> cnn = tl.models.MobileNetV1(pretrained=True, end_with='reshape').as_layer()
    >>> # add one more layer and build new model
    >>> ni = Input([None, 224, 224, 3], name="inputs")
    >>> nn = cnn(ni)
    >>> nn = Conv2d(100, (1, 1), (1, 1), name='out')(nn)
    >>> nn = Flatten(name='flatten')(nn)
    >>> model = tl.models.Model(inputs=ni, outputs=nn)
    >>> # train your own classifier (only update the last layer)
    >>> train_params = model.get_layer('out').trainable_weights

    Returns
    -------
        static MobileNetV1.
    """
    ni = Input([None, 224, 224, 3], name="input")

    for i in range(len(layer_names)):
        if i == 0:
            n = conv_block(ni,
                           n_filters[i],
                           strides=(2, 2),
                           name=layer_names[i])
        elif layer_names[i] in ['depth2', 'depth4', 'depth6', 'depth12']:
            n = depthwise_conv_block(n,
                                     n_filters[i],
                                     strides=(2, 2),
                                     name=layer_names[i])
        elif layer_names[i] == 'globalmeanpool':
            n = GlobalMeanPool2d(name='globalmeanpool')(n)
        elif layer_names[i] == 'reshape':
            n = Reshape([-1, 1, 1, 1024], name='reshape')(n)
        elif layer_names[i] == 'out':
            n = Conv2d(1000, (1, 1), (1, 1), name='out')(n)
            n = Flatten(name='flatten')(n)
        else:
            n = depthwise_conv_block(n, n_filters[i], name=layer_names[i])

        if layer_names[i] == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network
Пример #7
0
def ResNet50(pretrained=False, end_with='fc1000', n_classes=1000, name=None):
    """Pre-trained MobileNetV1 model (static mode). Input shape [?, 224, 224, 3].
    To use pretrained model, input should be in BGR format and subtracted from ImageNet mean [103.939, 116.779, 123.68].

    Parameters
    ----------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv, depth1, depth2 ... depth13, globalmeanpool, out].
        Default ``out`` i.e. the whole model.
    n_classes : int
        Number of classes in final prediction.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_resnet50.py`

    >>> # get the whole model with pretrained weights
    >>> resnet = tl.models.ResNet50(pretrained=True)
    >>> # use for inferencing
    >>> output = resnet(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract the features before fc layer
    >>> resnet = tl.models.ResNet50(pretrained=True, end_with='5c')
    >>> output = resnet(img1, is_train=False)

    Returns
    -------
        ResNet50 model.

    """
    ni = Input([None, 224, 224, 3], name="input")
    n = Conv2d(64, (7, 7),
               strides=(2, 2),
               padding='SAME',
               W_init=tf.initializers.he_normal(),
               name='conv1')(ni)
    n = BatchNorm(name='bn_conv1', act='relu')(n)
    n = MaxPool2d((3, 3), strides=(2, 2), name='max_pool1')(n)

    for i, block_name in enumerate(block_names):
        if len(block_name) == 2:
            stage = int(block_name[0])
            block = block_name[1]
            if block == 'a':
                strides = (1, 1) if stage == 2 else (2, 2)
                n = conv_block(n,
                               3,
                               block_filters[stage - 2],
                               stage=stage,
                               block=block,
                               strides=strides)
            else:
                n = identity_block(n,
                                   3,
                                   block_filters[stage - 2],
                                   stage=stage,
                                   block=block)
        elif block_name == 'avg_pool':
            n = GlobalMeanPool2d(name='avg_pool')(n)
        elif block_name == 'fc1000':
            n = Dense(n_classes, name='fc1000')(n)

        if block_name == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network
Пример #8
0
    def squeezenetv1(cls, x, end_with='output', is_train=False, reuse=None):
        with tf.compat.v1.variable_scope("squeezenetv1", reuse=reuse):
            with tf.compat.v1.variable_scope("input"):
                n = InputLayer(x)
                # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
                n = Conv2d(n,
                           64, (3, 3), (2, 2),
                           tf.nn.relu,
                           'SAME',
                           name='conv1')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire2"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire3"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire4"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire5"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire6"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire7"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire8"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire9"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("output"):
                n = DropoutLayer(n,
                                 keep=0.5,
                                 is_fix=True,
                                 is_train=is_train,
                                 name='drop1')
                n = Conv2d(n,
                           1000, (1, 1), (1, 1),
                           padding='VALID',
                           name='conv10')  # 13, 13, 1000
                n = GlobalMeanPool2d(n)
            if end_with in n.outputs.name:
                return n

            raise Exception("end_with : input, fire2, fire3 ... fire9, output")
Пример #9
0
def SqueezeNetV1(pretrained=False, end_with='out', name=None):
    """Pre-trained SqueezeNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1].

    Parameters
    ------------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv1, maxpool1, fire2, fire3, fire4, ..., out]. Default ``out`` i.e. the whole model.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>`__

    >>> # get the whole model
    >>> squeezenet = tl.models.SqueezeNetV1(pretrained=True)
    >>> # use for inferencing
    >>> output = squeezenet(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract features and Train a classifier with 100 classes

    >>> # get model without the last layer
    >>> cnn = tl.models.SqueezeNetV1(pretrained=True, end_with='drop1').as_layer()
    >>> # add one more layer and build new model
    >>> ni = Input([None, 224, 224, 3], name="inputs")
    >>> nn = cnn(ni)
    >>> nn = Conv2d(100, (1, 1), (1, 1), padding='VALID', name='conv10')(nn)
    >>> nn = GlobalMeanPool2d(name='globalmeanpool')(nn)
    >>> model = tl.models.Model(inputs=ni, outputs=nn)
    >>> # train your own classifier (only update the last layer)
    >>> train_params = model.get_layer('conv10').trainable_weights

    Returns
    -------
        static SqueezeNetV1.

    """
    ni = Input([None, 224, 224, 3], name="input")
    n = Lambda(lambda x: x * 255, name='scale')(ni)

    for i in range(len(layer_names)):
        if layer_names[i] == 'conv1':
            n = Conv2d(64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')(n)
        elif layer_names[i] == 'maxpool1':
            n = MaxPool2d((3, 3), (2, 2), 'VALID', name='maxpool1')(n)
        elif layer_names[i] == 'drop1':
            n = Dropout(keep=0.5, name='drop1')(n)
        elif layer_names[i] == 'out':
            n = Conv2d(1000, (1, 1), (1, 1), padding='VALID', name='conv10')(n)  # 13, 13, 1000
            n = GlobalMeanPool2d(name='globalmeanpool')(n)
        elif layer_names[i] in ['fire3', 'fire5']:
            n = fire_block(n, n_filters[i - 2], max_pool=True, name=layer_names[i])
        else:
            n = fire_block(n, n_filters[i - 2], max_pool=False, name=layer_names[i])

        if layer_names[i] == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network
Пример #10
0
def squeezenet(x, is_train=True, reuse=False):
    # model from: https://github.com/wohlert/keras-squeezenet
    #             https://github.com/DT42/squeezenet_demo/blob/master/model.py
    with tf.variable_scope("squeezenet", reuse=reuse):
        with tf.variable_scope("input"):
            n = InputLayer(x)
            # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
            n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire2"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire3"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire4"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire5"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire6"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire7"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire8"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire9"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("output"):
            n = DropoutLayer(n,
                             keep=0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')
            n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID',
                       name='conv10')  # 13, 13, 1000
            n = GlobalMeanPool2d(n)
        return n