예제 #1
0
def get_discriminator(latent_shape, image_shape, df_dim=64):

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    n1i = Input(image_shape)
    n1 = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(n1i)
    n1 = Conv2d(df_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Conv2d(df_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Conv2d(df_dim * 8, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Flatten()(n1)  # [-1,4*4*df_dim*8]

    n2i = Input(latent_shape)
    n2 = Dense(n_units=4 * 4 * df_dim * 8, W_init=w_init, b_init=None)(n2i)
    n2 = Dropout(keep=0.8)(n2)
    nn = Concat()([n1, n2])

    nn = Dense(n_units=1, W_init=w_init, b_init=None)(nn)

    return tl.models.Model(inputs=[n1i, n2i], outputs=nn, name='discriminator')
def hidden_model(inputs_shape):
    ni = Input(inputs_shape)
    nn = Dropout(keep=0.8)(ni)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)

    return Model(inputs=ni, outputs=nn, name="mlp_hidden")
예제 #3
0
    def __init__(self):
        super(CustomModel, self).__init__()

        self.dropout1 = Dropout(keep=0.8)  #(self.innet)
        self.dense1 = Dense(n_units=800, act=tf.nn.relu, in_channels=784)  #(self.dropout1)
        self.dropout2 = Dropout(keep=0.8)  #(self.dense1)
        self.dense2 = Dense(n_units=800, act=tf.nn.relu, in_channels=800)  #(self.dropout2)
        self.dropout3 = Dropout(keep=0.8)  #(self.dense2)
        self.dense3 = Dense(n_units=10, act=tf.nn.relu, in_channels=800)  #(self.dropout3)
 def __init__(self):
     super(CustomModelHidden, self).__init__()
     self.dropout1 = Dropout(keep=0.8)  #(self.innet)
     self.seq = LayerList([
         Dense(n_units=800, act=tf.nn.relu, in_channels=784),
         Dropout(keep=0.8),
         Dense(n_units=800, act=tf.nn.relu, in_channels=800),
     ])
     self.dropout3 = Dropout(keep=0.8)  #(self.seq)
예제 #5
0
def get_model(inputs_shape):
    ni = Input(inputs_shape)
    nn = Dropout(keep=0.8)(ni)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=800, act=tf.nn.relu)(nn)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=10, act=tf.nn.relu)(nn)
    M = Model(inputs=ni, outputs=nn, name="mlp")
    return M
예제 #6
0
def create_base_network(input_shape):
    '''Base network to be shared (eq. to feature extraction).
    '''
    input = Input(shape=input_shape)
    x = Flatten()(input)
    x = Dense(128, act=tf.nn.relu)(x)
    x = Dropout(0.9)(x)
    x = Dense(128, act=tf.nn.relu)(x)
    x = Dropout(0.9)(x)
    x = Dense(128, act=tf.nn.relu)(x)
    return Model(input, x)
예제 #7
0
    def get_model(inputs_shape):
        ni = Input(inputs_shape)
        nn = Dropout(keep=0.8)(ni)
        nn = Dense(n_units=800, act=tf.nn.relu)(nn)
        nn = Dropout(keep=0.8)(nn)
        nn = Dense(n_units=800, act=tf.nn.relu)(nn)

        # FIXME: currently assume the inputs and outputs are both Layer. They can be lists.
        M_hidden = Model(inputs=ni, outputs=nn, name="mlp_hidden")

        nn = Dropout(keep=0.8)(M_hidden.as_layer())
        nn = Dense(n_units=10, act=tf.nn.relu)(nn)
        return Model(inputs=ni, outputs=nn, name="mlp")
예제 #8
0
def get_model(inputs_shape):
    ni = Input(inputs_shape)
    nn = Dropout(keep=0.8)(ni)
    nn = Dense(n_units=800, act=tf.nn.relu, in_channels=784)(
        nn
    )  # in_channels is optional in this case as it can be inferred by the previous layer
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=800, act=tf.nn.relu, in_channels=800)(
        nn
    )  # in_channels is optional in this case as it can be inferred by the previous layer
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=10, act=tf.nn.relu, in_channels=800)(
        nn
    )  # in_channels is optional in this case as it can be inferred by the previous layer
    M = Model(inputs=ni, outputs=nn, name="mlp")
    return M
def get_model(inputs_shape, hmodel):
    hidden = hmodel.as_layer()
    ni = Input(inputs_shape)
    nn = hidden(ni)
    nn = Dropout(keep=0.8)(nn)
    nn = Dense(n_units=10, act=tf.nn.relu)(nn)

    return Model(inputs=ni, outputs=nn, name="mlp")
예제 #10
0
def SqueezeNetV1(pretrained=False, end_with='out', name=None):
    """Pre-trained SqueezeNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1].

    Parameters
    ------------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv1, maxpool1, fire2, fire3, fire4, ..., out]. Default ``out`` i.e. the whole model.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>`__

    >>> # get the whole model
    >>> squeezenet = tl.models.SqueezeNetV1(pretrained=True)
    >>> # use for inferencing
    >>> output = squeezenet(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract features and Train a classifier with 100 classes

    >>> # get model without the last layer
    >>> cnn = tl.models.SqueezeNetV1(pretrained=True, end_with='drop1').as_layer()
    >>> # add one more layer and build new model
    >>> ni = Input([None, 224, 224, 3], name="inputs")
    >>> nn = cnn(ni)
    >>> nn = Conv2d(100, (1, 1), (1, 1), padding='VALID', name='conv10')(nn)
    >>> nn = GlobalMeanPool2d(name='globalmeanpool')(nn)
    >>> model = tl.models.Model(inputs=ni, outputs=nn)
    >>> # train your own classifier (only update the last layer)
    >>> train_params = model.get_layer('conv10').trainable_weights

    Returns
    -------
        static SqueezeNetV1.

    """
    ni = Input([None, 224, 224, 3], name="input")
    n = Lambda(lambda x: x * 255, name='scale')(ni)

    for i in range(len(layer_names)):
        if layer_names[i] == 'conv1':
            n = Conv2d(64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')(n)
        elif layer_names[i] == 'maxpool1':
            n = MaxPool2d((3, 3), (2, 2), 'VALID', name='maxpool1')(n)
        elif layer_names[i] == 'drop1':
            n = Dropout(keep=0.5, name='drop1')(n)
        elif layer_names[i] == 'out':
            n = Conv2d(1000, (1, 1), (1, 1), padding='VALID', name='conv10')(n)  # 13, 13, 1000
            n = GlobalMeanPool2d(name='globalmeanpool')(n)
        elif layer_names[i] in ['fire3', 'fire5']:
            n = fire_block(n, n_filters[i - 2], max_pool=True, name=layer_names[i])
        else:
            n = fire_block(n, n_filters[i - 2], max_pool=False, name=layer_names[i])

        if layer_names[i] == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network
예제 #11
0
def u_net(inputs, refine=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tl.act.lrelu(x, 0.2)

    # ENCODER
    conv1 = Conv2d(64, (4, 4), (2, 2), padding='SAME', W_init=w_init)(inputs)
    conv1 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv1)

    conv2 = Conv2d(128, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv1)
    conv2 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv2)

    conv3 = Conv2d(256, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv2)
    conv3 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv3)

    conv4 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv3)
    conv4 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv4)

    conv5 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv4)
    conv5 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv5)

    conv6 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv5)
    conv6 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv6)

    conv7 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv6)
    conv7 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv7)

    conv8 = Conv2d(512, (4, 4), (2, 2), padding='SAME', W_init=w_init)(conv7)
    conv8 = InstanceNorm2d(act=lrelu, gamma_init=g_init)(conv8)

    # DECODER
    d0 = DeConv2d(n_filter=512, filter_size=(4, 4))(conv8)
    d0 = Dropout(0.5)(d0)
    d0 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d0), conv7])

    d1 = DeConv2d(n_filter=512, filter_size=(4, 4))(d0)
    d1 = Dropout(0.5)(d1)
    d1 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d1), conv6])

    d2 = DeConv2d(n_filter=512, filter_size=(4, 4))(d1)
    d2 = Dropout(0.5)(d2)
    d2 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d2), conv5])

    d3 = DeConv2d(n_filter=512, filter_size=(4, 4))(d2)
    d3 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d3), conv4])

    d4 = DeConv2d(n_filter=256, filter_size=(4, 4))(d3)
    d4 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d4), conv3])

    d5 = DeConv2d(n_filter=128, filter_size=(4, 4))(d4)
    d5 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d5), conv2])

    d6 = DeConv2d(n_filter=64, filter_size=(4, 4))(d5)
    d6 = Concat()(
        [InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d6), conv1])

    d7 = DeConv2d(n_filter=64, filter_size=(4, 4))(d6)
    d7 = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(d7)

    nn = Conv2d(1, (1, 1), (1, 1),
                act=tf.nn.tanh,
                padding='SAME',
                W_init=w_init)(d7)

    if refine:
        nn = RampElementwise(tf.add, act=tl.act.ramp, v_min=-1)([nn, inputs])

    return nn