示例#1
0
    def get_model(inputs_shape):
        ni = Input(inputs_shape)
        nn = Dropout(keep=0.8)(ni)
        nn = Dense(n_units=800, act=tf.nn.relu)(nn)
        nn = Dropout(keep=0.8)(nn)
        nn = Dense(n_units=800, act=tf.nn.relu)(nn)

        # FIXME: currently assume the inputs and outputs are both Layer. They can be lists.
        M_hidden = Model(inputs=ni, outputs=nn, name="mlp_hidden")

        nn = Dropout(keep=0.8)(M_hidden.as_layer())
        nn = Dense(n_units=10, act=tf.nn.relu)(nn)
        return Model(inputs=ni, outputs=nn, name="mlp")
示例#2
0
    def __init__(self, shape, n_features):
        strategy = tf.distribute.MirroredStrategy()
        with strategy.scope():
            image_ni, image_nn = self.encoder(shape, n_features)
            self.image_encoder = Model(inputs=image_ni,
                                       outputs=image_nn,
                                       name="image_encoder")

            self.pose_encoder = self.get_pose_encoder(shape, n_features)

            decoder_ni, decoder_nn = self.decoder(
                (None, shape[1] // 8, shape[2] // 8, None))
            self.image_decoder = Model(inputs=decoder_ni,
                                       outputs=decoder_nn,
                                       name="image_decoder")
示例#3
0
    def test_ModelLayer(self):
        print('-' * 20, 'ModelLayer', '-' * 20)

        def MyModel():
            nii = Input(shape=[None, 100])
            nn = Dense(50, in_channels=100)(nii)
            nn = Dropout(0.9)(nn)
            nn = Dense(10)(nn)
            M = Model(inputs=nii, outputs=nn)
            return M

        mlayer = MyModel().as_layer()

        ni = Input(shape=[None, 100])
        nn = mlayer(ni)
        nn = Dense(5)(nn)
        net = Model(inputs=ni, outputs=nn)

        self.assertEqual(net._nodes_fixed, True)

        data = np.random.normal(size=[4, 100]).astype(np.float32)
        out = net(data, is_train=False)

        self.assertEqual(net._nodes_fixed, True)
        self.assertEqual(net.all_layers[1]._nodes_fixed, True)
        self.assertEqual(net.all_layers[1].model._nodes_fixed, True)
        self.assertEqual(net.all_layers[1].model.all_layers[0]._nodes_fixed, True)
示例#4
0
 def MyModel():
     nii = Input(shape=[None, 100])
     nn = Dense(50, in_channels=100)(nii)
     nn = Dropout(0.9)(nn)
     nn = Dense(10)(nn)
     M = Model(inputs=nii, outputs=nn)
     return M
示例#5
0
文件: model.py 项目: se7enXF/TAM_Net
    def tam_net(self):
        inputs = Input(self.in_shape, name='inputs')

        e_in = inputs
        for i in range(0, 5):
            e_out = Conv2d(self.f_size * (2**i), (3, 3), (2, 2),
                           act=tf.nn.relu,
                           name=f'e{i+1}_con')(e_in)
            e_in = self.residual_block(i, e=True)(e_out)
            self.__setattr__(f'e{i+1}', e_in)

        d_in = e_in
        for i in range(4, 0, -1):
            d_out = DeConv2d(self.f_size * (2**(i - 1)), (3, 3), (2, 2),
                             name=f'd{i}_con')(d_in)
            encoder = self.__getattribute__(f'e{i}')
            d_out = Concat(concat_dim=3, name=f'concat{i}')([encoder, d_out])
            d_out = Conv2d(self.f_size * (2**(i - 1)), (1, 1), (1, 1),
                           name=f'fusion{i}')(d_out)
            d_in = self.residual_block(i - 1, e=False)(d_out)
            self.__setattr__(f'd{i + 1}', d_in)

        outs = DeConv2d(3, (3, 3), (2, 2), name='d_con_out')(d_in)
        outs = Conv2d(3, (1, 1), (1, 1), act=tf.nn.sigmoid, name='outs')(outs)
        return Model(inputs=inputs, outputs=outs, name="TAM_Net")
示例#6
0
def get_Ec(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
           name=None):
    # ref: Multimodal Unsupervised Image-to-Image Translation
    lrelu = lambda x: tl.act.lrelu(x, 0.01)
    w_init = tf.random_normal_initializer(stddev=0.02)
    channel = 64
    ni = Input(x_shape)
    n = Conv2d(channel, (7, 7), (1, 1), act=lrelu, W_init=w_init)(ni)
    for i in range(2):
        n = Conv2d(channel * 2, (3, 3), (2, 2), W_init=w_init)(n)
        n = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(n)
        channel = channel * 2

    for i in range(1, 5):
        # res block
        nn = Conv2d(channel, (3, 3), (1, 1),
                    act=None,
                    W_init=w_init,
                    b_init=None)(n)
        nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(channel, (3, 3), (1, 1),
                    act=None,
                    W_init=w_init,
                    b_init=None)(nn)
        nn = InstanceNorm2d(act=None, gamma_init=g_init)(nn)
        n = Elementwise(tf.add)([n, nn])

    n = GaussianNoise(is_always=False)(n)

    M = Model(inputs=ni, outputs=n, name=name)
    return M
示例#7
0
def get_D(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
          name=None):
    # ref: Image-to-Image Translation with Conditional Adversarial Networks
    # input: (batch_size_train, 256, 256, 3)
    # output: (batch_size_train, )
    ch = 64
    n_layer = 8
    tch = ch
    ni = Input(x_shape)
    n = SpectralNormConv2d(ch, (3, 3), (2, 2), act=lrelu, W_init=w_init)(ni)
    for i in range(1, n_layer - 1):
        n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2),
                               act=lrelu,
                               W_init=w_init)(n)
        tch *= 2
    n = SpectralNormConv2d(tch * 2, (3, 3), (2, 2), act=lrelu,
                           W_init=w_init)(n)
    tch *= 2
    n = SpectralNormConv2d(1, (1, 1), (1, 1),
                           act=None,
                           padding='VALID',
                           W_init=w_init)(n)
    n = Reshape([-1, 1])(n)
    M = Model(inputs=ni, outputs=n, name=name)
    return M
示例#8
0
def model(input_shape, n_classes):
    in_net = Input(shape=input_shape, name='input')

    net = Conv2d(64, (5, 5), (1, 1),
                 act=tf.nn.relu,
                 padding='SAME',
                 name='cnn1')(in_net)
    net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
    net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net)

    net = TernaryConv2d(64, (5, 5), (1, 1),
                        act=tf.nn.relu,
                        padding='SAME',
                        name='cnn2')(net)
    net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net)
    net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)

    net = Flatten(name='flatten')(net)

    net = TernaryDense(384, act=tf.nn.relu, name='d1relu')(net)
    net = TernaryDense(192, act=tf.nn.relu, name='d2relu')(net)
    net = Dense(n_classes, act=None, name='output')(net)

    net = Model(inputs=in_net, outputs=net, name='dorefanet')
    return net
示例#9
0
        def get_model(inputs_shape):
            ni = Input(inputs_shape)

            ## 1. Localisation network
            # use MLP as the localisation net
            nn = Flatten()(ni)
            nn = Dense(n_units=20, act=tf.nn.tanh)(nn)
            nn = Dropout(keep=0.8)(nn)
            # you can also use CNN instead for MLP as the localisation net

            ## 2. Spatial transformer module (sampler)
            stn = SpatialTransformer2dAffine(out_size=(40, 40), in_channels=20)
            # s = stn((nn, ni))
            nn = stn((nn, ni))
            s = nn

            ## 3. Classifier
            nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn)
            nn = Conv2d(16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME')(nn)
            nn = Flatten()(nn)
            nn = Dense(n_units=1024, act=tf.nn.relu)(nn)
            nn = Dense(n_units=10, act=tf.identity)(nn)

            M = Model(inputs=ni, outputs=[nn, s])
            return M
示例#10
0
def model(inputs_shape, n_class=10):
    in_net = Input(inputs_shape, name='input')
    net = DorefaConv2d(1,
                       3,
                       32, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn1')(in_net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn1')(net)

    net = DorefaConv2d(1,
                       3,
                       64, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn2')(net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn2')(net)

    net = Flatten('flatten')(net)
    net = DorefaDense(1, 3, 256, b_init=None, name='dense')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn3')(net)

    net = Dense(n_class, b_init=None, name='bout')(net)
    net = BatchNorm(name='bno')(net)
    net = Model(inputs=in_net, outputs=net, name='dorefanet')
    return net
示例#11
0
        def get_model(input_shape):
            ni = Input(input_shape)

            nii = Conv2d(32, filter_size=(3, 3), strides=(1, 1),
                         name='conv1')(ni)
            nn = Dropout(keep=0.9, name='drop1')(nii)

            conv = Conv2d(32, filter_size=(3, 3), strides=(1, 1), name='conv2')
            tt = conv(nn)  # conv2_node_0
            nn = conv(nn)  # conv2_node_1

            # a branch
            na = Conv2d(64, filter_size=(3, 3), strides=(1, 1),
                        name='conv3')(nn)
            na = MaxPool2d(name='pool1')(na)

            # b branch
            nb = MaxPool2d(name='pool2')(nn)
            nb = conv(nb)  # conv2_node_2

            out = Concat(name='concat')([na, nb])
            M = Model(inputs=ni, outputs=[out, nn, nb])

            gg = conv(nii)  # this node will not be added since model fixed

            return M
示例#12
0
def model(inputs_shape, n_class=10):
    # In BNN, all the layers inputs are binary, with the exception of the first layer.
    # ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py
    net_in = Input(inputs_shape, name='input')
    net = BinaryConv2d(32, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn1')(net_in)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn1')(net)

    net = Sign("sign1")(net)
    net = BinaryConv2d(64, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn2')(net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn2')(net)

    net = Flatten('ft')(net)
    net = Sign("sign2")(net)
    net = BinaryDense(256, b_init=None, name='dense')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn3')(net)

    net = Sign("sign3")(net)
    net = BinaryDense(10, b_init=None, name='bout')(net)
    net = BatchNorm(name='bno')(net)
    net = Model(inputs=net_in, outputs=net, name='binarynet')
    return net
def model(inputs_shape, n_class=10):
    net_in = Input(inputs_shape, name="input")

    net = QuanConv2dWithBN(
        n_filter=32, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn1'
    )(net_in)
    net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')(net)

    net = QuanConv2dWithBN(
        n_filter=64, filter_size=(5, 5), strides=(1, 1), padding='SAME', act=tl.nn.relu, name='qconvbn2'
    )(net)
    net = MaxPool2d(filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')(net)

    net = Flatten(name='ft')(net)

    # net = QuanDense(256, act="relu", name='qdbn')(net)
    # net = QuanDense(n_class, name='qdbn_out')(net)

    net = QuanDenseLayerWithBN(256, act="relu", name='qdbn')(net)
    net = QuanDenseLayerWithBN(n_class, name='qdbn_out')(net)

    # net = Dense(256, act='relu', name='Dense1')(net)
    # net = Dense(n_class, name='Dense2')(net)

    net = Model(inputs=net_in, outputs=net, name='quan')
    return net
示例#14
0
def Discriminator(input_shape, prefix=""):
    I = Input(input_shape)
    D = Conv2d(64, (4, 4), (2, 2),
               padding='SAME',
               act=lrelu,
               b_init=None,
               name=prefix + 'D_conv_1')(I)
    D = InstanceNorm2d(act=lrelu)(Conv2d(128, (4, 4), (2, 2),
                                         padding='SAME',
                                         b_init=None,
                                         name=prefix + 'D_conv_2')(D))
    D = InstanceNorm2d(act=lrelu)(Conv2d(256, (4, 4), (2, 2),
                                         padding='SAME',
                                         b_init=None,
                                         name=prefix + 'D_conv_3')(D))
    D = InstanceNorm2d(act=lrelu)(Conv2d(512, (4, 4), (2, 2),
                                         padding='SAME',
                                         b_init=None,
                                         name=prefix + 'D_conv_4')(D))
    D = InstanceNorm2d(act=lrelu)(Conv2d(512, (4, 4), (2, 2),
                                         padding='SAME',
                                         b_init=None,
                                         name=prefix + 'D_conv_5')(D))
    D = InstanceNorm2d(act=lrelu)(Conv2d(512, (4, 4), (2, 2),
                                         padding='SAME',
                                         b_init=None,
                                         name=prefix + 'D_conv_6')(D))
    D = Conv2d(1, (4, 4), (1, 1), name=prefix + 'D_conv_7')(D)
    D = GlobalMeanPool2d()(D)
    D_net = Model(inputs=I, outputs=D, name=prefix + 'Discriminator')
    return D_net
示例#15
0
def create_model(inputs_shape):
    W_init = tl.initializers.truncated_normal(stddev=5e-2)
    W_init2 = tl.initializers.truncated_normal(stddev=0.04)
    ni = Input(inputs_shape)
    nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, name='conv1_1')(ni)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_1')(nn)
    nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv1_2')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1_2')(nn)

    nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_1')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_1')(nn)
    nn = Conv2d(128, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2_2')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2_2')(nn)

    nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_1')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_1')(nn)
    nn = Conv2d(256, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv3_2')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool3_2')(nn)

    nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_1')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_1')(nn)
    nn = Conv2d(512, (3, 3), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv4_2')(nn)
    nn = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool4_2')(nn)

    nn = Flatten(name='flatten')(nn)
    nn = Dense(1000, act=None, W_init=W_init2, name='output')(nn)

    M = Model(inputs=ni, outputs=nn, name='cnn')
    return M
示例#16
0
def get_G(name=None):
    gf_dim = 32
    w_init = tf.random_normal_initializer(stddev=0.02)

    nx = Input((flags.batch_size, 256, 256, 3))
    n = Conv2d(gf_dim, (7, 7), (1, 1), W_init=w_init)(nx)
    n = InstanceNorm2d(act=tf.nn.relu)(n)

    n = Conv2d(gf_dim * 2, (3, 3), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=tf.nn.relu)(n)

    n = Conv2d(gf_dim * 4, (3, 3), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=tf.nn.relu)(n)

    for i in range(9):
        _n = Conv2d(gf_dim * 4, (3, 3), (1, 1), W_init=w_init)(n)
        _n = InstanceNorm2d(act=tf.nn.relu)(_n)
        _n = Conv2d(gf_dim * 4, (3, 3), (1, 1), W_init=w_init)(_n)
        _n = InstanceNorm2d()(_n)
        _n = Elementwise(tf.add)([n, _n])
        n = _n

    n = DeConv2d(gf_dim * 2, (3, 3), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=tf.nn.relu)(n)

    n = DeConv2d(gf_dim, (3, 3), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=tf.nn.relu)(n)

    n = Conv2d(3, (7, 7), (1, 1), act=tf.nn.tanh, W_init=w_init)(n)

    M = Model(inputs=nx, outputs=n, name=name)
    return M
示例#17
0
def get_G(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init)(nin)
    temp = n

    # B residual blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
        nn = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)([n, nn])
        n = nn

    n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(gamma_init=g_init)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    nn = Conv2d(3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn, name="generator")
    return G
def hidden_model(inputs_shape):
    ni = Input(inputs_shape)
    nn = Dropout(keep=0.8)(ni)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)

    return Model(inputs=ni, outputs=nn, name="mlp_hidden")
def get_model(inputs_shape, hmodel):
    hidden = hmodel.as_layer()
    ni = Input(inputs_shape)
    nn = hidden(ni)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=10, act=tf.nn.relu)(nn)

    return Model(inputs=ni, outputs=nn, name="mlp")
示例#20
0
def get_model(inputs_shape):
    ni = Input(inputs_shape)
    nn = Dropout(keep=0.8)(ni)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=10, act=tf.nn.relu)(nn)
    M = Model(inputs=ni, outputs=nn, name="mlp")
    return M
示例#21
0
def get_siamese_network(input_shape):
    """Create siamese network with shared base network as layer
    """
    base_layer = create_base_network(input_shape).as_layer()

    ni_1 = Input(input_shape)
    ni_2 = Input(input_shape)
    nn_1 = base_layer(ni_1)
    nn_2 = base_layer(ni_2)
    return Model(inputs=[ni_1, ni_2], outputs=[nn_1, nn_2])
示例#22
0
def VGG_static(layer_type, batch_norm=False, end_with='outputs', name=None):
    ni = Input([None, 224, 224, 3])

    config = cfg[mapped_cfg[layer_type]]
    layers = make_layers(config, batch_norm, end_with)

    nn = layers(ni)

    M = Model(inputs=ni, outputs=nn, name=name)
    return M
示例#23
0
文件: model.py 项目: se7enXF/TAM_Net
 def residual_block(self, n_k=1, e=True):
     k_size = self.f_size * (2**n_k)
     ni = Input([None, None, None, k_size])
     nn = Conv2d(k_size, (3, 3), (1, 1))(ni)
     nn = BatchNorm(act=tf.nn.relu)(nn)
     nn = Conv2d(k_size, (3, 3), (1, 1))(nn)
     nn = BatchNorm()(nn)
     nn = Elementwise(tf.add)([ni, nn])
     return Model(inputs=ni,
                  outputs=nn,
                  name=f'{"e" if e else "d"}{n_k+1}_res').as_layer()
示例#24
0
def create_base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    input = Input(shape=input_shape)
    x = Flatten()(input)
    x = Dense(128, act=tf.nn.relu)(x)
    x = Dropout(0.9)(x)
    x = Dense(128, act=tf.nn.relu)(x)
    x = Dropout(0.9)(x)
    x = Dense(128, act=tf.nn.relu)(x)
    return Model(input, x)
示例#25
0
def get_dx_G(input_shape, u_net_blocks=2, refine=False, name="dx_generator"):

    nin = Input(input_shape)

    nn = u_net(nin, refine)
    for i in range(u_net_blocks - 1):
        nn = u_net(nn, refine)

    G = Model(inputs=nin, outputs=nn, name=name)

    return G
示例#26
0
def model(input_shape, n_classes, bitW, bitA):
    in_net = Input(shape=input_shape, name='input')
    net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
    net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
    net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
    net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
    net = Flatten(name='flatten')(net)
    net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
    net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
    net = Dense(n_classes, act=None, name='output')(net)
    net = Model(inputs=in_net, outputs=net, name='dorefanet')
    return net
示例#27
0
文件: utils.py 项目: paolomandica/AML
def get_D(input_shape):
    """Get a Discriminator model with randomly inizialized weights.

    Args:
        input_shape : tuple
            Input shape of the Input layer of the model.
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    df_dim = 64
    def lrelu(x): return tl.act.lrelu(x, 0.2)

    nin = Input(input_shape)
    n = Conv2d(df_dim, (4, 4), (2, 2), act=lrelu,
               padding='SAME', W_init=w_init)(nin)

    n = Conv2d(df_dim * 2, (4, 4), (2, 2), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 4, (4, 4), (2, 2), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (4, 4), (2, 2), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (4, 4), (2, 2), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 32, (4, 4), (2, 2), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (1, 1), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(n)
    nn = BatchNorm2d(gamma_init=gamma_init)(n)

    n = Conv2d(df_dim * 2, (1, 1), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(nn)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 2, (3, 3), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (3, 3), (1, 1), padding='SAME',
               W_init=w_init, b_init=None)(n)
    n = BatchNorm2d(gamma_init=gamma_init)(n)
    n = Elementwise(combine_fn=tf.add, act=lrelu)([n, nn])

    n = Flatten()(n)
    no = Dense(n_units=1, W_init=w_init)(n)
    D = Model(inputs=nin, outputs=no)  # , name="discriminator"
    return D
示例#28
0
    def get_pose_encoder(self, shape, n_features):
        pose_ni, pose_nn = self.encoder(shape, n_features)
        W_init = tl.initializers.truncated_normal(stddev=1e-2)

        pose_nn = Conv2d(n_filter=n_features,
                         filter_size=(1, 1),
                         strides=(1, 1),
                         act=None,
                         W_init=W_init,
                         padding="SAME")(pose_nn)

        return Model(inputs=pose_ni, outputs=pose_nn, name="pose_encoder")
示例#29
0
def model_G2():  ##Phase2 Generator

    gamma_init = tf1.random_normal_initializer(1., 0.02)
    w_init = tf1.random_normal_initializer(stddev=0.02)
    fn = tf1.nn.relu

    ##	Input layers
    lr_image = Input(
        (None, 128, 128, 3))  ##	(batch_size, height, width, channel)
    hr_image = Input((None, 512, 512, 3))

    ## 	Feature extracting layers from LR image
    lr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(lr_image)  # Shape(1,256,256,64)
    lr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(lr_feature_layer_1)

    lr_feature_layer_2 = SubpixelConv2d(scale=4, act=fn)(
        lr_feature_layer_1)  # Shape(1,256,256,16)

    ##	Feature extracting layers from HR image

    hr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(hr_image)  # Shape(1,256,256,64)
    hr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(hr_feature_layer_1)

    ##	Features Merging layers

    merge_layer = Concat(concat_dim=-1)(
        [lr_feature_layer_2, hr_feature_layer_1])  # Shape(1,256,256,128)

    non_linearity_layer_1 = Conv2d(64, (5, 5), (1, 1),
                                   act=fn,
                                   padding='SAME',
                                   W_init=w_init)(
                                       merge_layer)  # Shape(1,256,256,256)
    non_linearity_layer_1 = BatchNorm2d(
        gamma_init=gamma_init)(non_linearity_layer_1)

    ## 	Reconstruction layers
    Recon_layer_1 = Conv2d(3, (5, 5), (1, 1),
                           act=fn,
                           padding='SAME',
                           W_init=w_init)(
                               non_linearity_layer_1)  # Shape(1,256,256,1)
    Recon_layer_2 = Elementwise(combine_fn=tf1.add)([Recon_layer_1, hr_image
                                                     ])  # Shape(1,256,256,1)

    return Model(inputs=[lr_image, hr_image], outputs=Recon_layer_2)
示例#30
0
def VGG_static(layer_type, batch_norm=False, end_with='outputs', name=None):
    ni = Input([None, 224, 224, 3])
    n = Lambda(
        lambda x: x * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]), name='scale'
    )(ni)

    config = cfg[mapped_cfg[layer_type]]
    layers = make_layers(config, batch_norm, end_with)

    nn = layers(n)

    M = Model(inputs=ni, outputs=nn, name=name)
    return M