예제 #1
0
def build_discriminator(image, reuse=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            tf.get_variable_scope().reuse_variables()

        #
        t = image

        t = conv2d(inputs=t,
                   filters=64,
                   kernel_size=[5, 5],
                   strides=2,
                   padding="same",
                   activation=my_leaky_relu)

        t = batch_normalization(inputs=t)

        t = conv2d(inputs=t,
                   filters=128,
                   kernel_size=[5, 5],
                   strides=2,
                   padding="same",
                   activation=my_leaky_relu)

        t = batch_normalization(inputs=t)

        t = dropout(inputs=t, rate=DROP_RATE)

        t = flatten(inputs=t)

        t = dense(inputs=t, units=1, activation=tf.sigmoid)

        decision = t
        #print("\nD output shape: {}".format(decision.shape))
        return decision
예제 #2
0
def f_net_dqn(inputs_state, inputs_action, is_training):
    depth = inputs_state.get_shape()[1:].num_elements()
    inputs_state = tf.reshape(inputs_state, shape=[-1, depth])
    inputs_state = layers.batch_normalization(inputs_state, axis=1, training=is_training)
    hidden1 = layers.dense(
        inputs=inputs_state, units=400,
        activation=None,
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='hidden1',
    )
    # hidden1 = tf.nn.relu(hidden1)
    hidden1 = tf.nn.relu(layers.batch_normalization(hidden1, axis=1, training=is_training))
    depth = inputs_action.get_shape()[1:].num_elements()
    inputs_action = tf.reshape(inputs_action, shape=[-1, depth])
    hidden1 = tf.concat([hidden1, inputs_action], axis=1)
    hidden2 = layers.dense(
        inputs=hidden1, units=300,
        activation=None,
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='hidden2',
    )
    hidden2 = tf.nn.relu(hidden2)
    # hidden2 = tf.nn.relu(layers.batch_normalization(hidden2, axis=1, training=is_training))
    q = layers.dense(
        inputs=hidden2, units=1,
        activation=None,
        kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='out',
    )
    q = tf.squeeze(q, axis=1, name='out_sqz')
    return q
예제 #3
0
    def __call__(self):
        """ Builds the network. """
        x = conv2d(self.img_input, self.nb_filter, self.initial_kernel, kernel_initializer='he_normal', padding='same',
                   strides=self.initial_strides, use_bias=False, **self.conv_kwargs)

        if self.subsample_initial_block:
            x = batch_normalization(x, **self.bn_kwargs)
            x = tf.nn.relu(x)
            x = max_pooling2d(x, (3, 3), data_format=self.data_format, strides=(2, 2), padding='same')

        # Add dense blocks
        nb_filter = self.nb_filter
        for block_idx in range(self.nb_dense_block - 1):
            with tf.variable_scope('denseblock_{}'.format(block_idx)):
                x, nb_filter = self._dense_block(x, self.nb_layers[block_idx], nb_filter)
                # add transition_block
                x = self._transition_block(x, nb_filter)
                nb_filter = int(nb_filter * self.compression)

        # The last dense_block does not have a transition_block
        x, nb_filter = self._dense_block(x, self.final_nb_layer, self.nb_filter)

        x = batch_normalization(x, **self.bn_kwargs)
        x = tf.nn.relu(x)

        x = GlobalAveragePooling2D(data_format=self.data_format)(x)

        if self.include_top:
            x = dense(x, self.nb_classes)

        return x
예제 #4
0
    def forward(self, X, momentum=0.5):
        # 1th layer
        z = conv2d(X, self.W1, [1, 2, 2, 1], padding="SAME")
        #add bias
        z = tf.nn.bias_add(z, self.b1)
        #Activation Function
        z = tf.nn.leaky_relu(z)

        # 2nd layer
        z = conv2d(z, self.W2, [1, 1, 1, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b2)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # 3th layer
        z = conv2d(z, self.W3, [1, 2, 2, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b3)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # 4th layer
        z = conv2d(z, self.W4, [1, 1, 1, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b4)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # Fully Connected Layer
        # Flatten Image
        z = Flatten()(z)
        z = Dense(1)(z)
        logits = tf.nn.bias_add(z, self.b5)
        return logits  #Activation Function included in cost function
    def forward(self, X, momentum=0.5):
        X = tf.reshape(X, [-1, self.img_rows, self.img_cols, self.channels])
        z = conv2d(X, self.W1, [1, 2, 2, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b1)
        z = tf.nn.leaky_relu(z)

        z = conv2d(z, self.W2, [1, 1, 1, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b2)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        z = conv2d(z, self.W3, [1, 2, 2, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b3)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        z = conv2d(z, self.W4, [1, 1, 1, 1], padding="SAME")
        z = tf.nn.bias_add(z, self.b4)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        z = tf.reshape(z, [-1, 7 * 7 * 256])
        logits = tf.matmul(z, self.W5)
        logits = tf.nn.bias_add(logits, self.b5)
        return logits
예제 #6
0
파일: train.py 프로젝트: GuoYi0/infoGAN-tf
def generatorNet(name, inputs, is_training, use_batchNorm, reuse=None):
    idx = 0
    f = inputs
    with tf.variable_scope(name, reuse=reuse):
        f = layers.dense(f, 1024, None, name="dense_%d" % idx)
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        idx += 1
        f = layers.dense(f, 7 * 7 * 128, None, name="dense_%d" % idx)  # 6272
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        f = tf.reshape(f, [-1, 7, 7, 128], name="reshape_%d" % idx)

        idx += 1
        f = layers.conv2d_transpose(f, 64, kernel_size=4, strides=2, padding="SAME", name="deconv_%d" % idx)
        if use_batchNorm:
            f = layers.batch_normalization(f, training=is_training, name="bn_%d" % idx)
        f = tf.nn.relu(f, "relu_%d" % idx)

        idx += 1
        f = layers.conv2d_transpose(f, 1, kernel_size=4, strides=2, padding="SAME", name="deconv_%d" % idx)
        f = tf.nn.sigmoid(f, "sigmoid_%d" % idx)

    return f
예제 #7
0
def res_block_2d(x, kernel_size, training, batch_norm=True):

    assert len(x.shape) == 4, "Input tensor must be 4-dimensional."

    filters = int(x.shape[3])

    y = ly.conv2d(inputs=x,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    y = k.layers.PReLU()(y)

    y = ly.conv2d(inputs=y,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    return tf.add(x, y)
예제 #8
0
def res_block_2d(x, kernel_size, activation, training, batch_norm=True):

    assert len(x.shape) == 4, "Input tensor must be 4-dimensional."
    activation = activation.lower()

    filters = int(x.shape[3])

    y = ly.conv2d(inputs=x,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    y = nonlinear[activation](y)

    y = ly.conv2d(inputs=y,
                  filters=filters,
                  kernel_size=kernel_size,
                  strides=1,
                  padding='same')

    if batch_norm:
        y = ly.batch_normalization(y, training=training)

    return tf.add(x, y)
예제 #9
0
    def forward(self, X, Y, momentum=0.5):

        # print('X shape in G is ', X.shape)
        # print('Y shape in G is ', Y.shape)
        # print('X type in G is ', type(X))
        # print('Y type in G is ', type(Y))
        # print('X dtype in G is ', X.dtype)
        # print('Y dtype in G is ', Y.dtype)
        # print('W1 shape in G is ', self.W1.shape)

        z = tf.concat([X, Y], 1)
        # print('z shape in G after concat is ', z.shape)
        # print('z type in G is ', type(z))
        # print('z dtype in G is ', z.dtype)

        z = tf.matmul(z, self.W1)
        z = tf.nn.relu(z)
        z = tf.reshape(z, [-1, 16, 16, 512])

        z = UpSampling2D()(z)
        z = conv2d(z, self.W2, [1, 1, 1, 1], padding="SAME")
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        z = UpSampling2D()(z)
        z = conv2d(z, self.W3, [1, 1, 1, 1], padding="SAME")
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        z = conv2d(z, self.W4, [1, 1, 1, 1], padding="SAME")

        return tf.nn.tanh(z)
예제 #10
0
 def generator_fn(input_image, mode):
     with tf.name_scope('generator'):
         #4*4
         dense_1 = layers.dense(inputs=input_image, units=batch_size*16)
         batch_norm_1 = layers.batch_normalization(inputs=dense_1)
         reshape_1 = tf.reshape(batch_norm_1, shape=(batch_size, 4, 4, batch_size))
         relu_1 = tf.nn.relu(reshape_1)
         # 8*8
         conv_T_1 = layers.conv2d_transpose(inputs=relu_1, filters=64, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_2 = layers.batch_normalization(inputs=conv_T_1)
         relu_2 = tf.nn.relu(batch_norm_2)
         # 16*16
         conv_T_2 = layers.conv2d_transpose(inputs=relu_2, filters=32, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_3 = layers.batch_normalization(inputs=conv_T_2)
         relu_3 = tf.nn.relu(batch_norm_3)
         # 32*32
         conv_T_3 = layers.conv2d_transpose(inputs=relu_3, filters=16, kernel_size=(2, 2), strides=(2, 2), padding='same')
         batch_norm_4 = layers.batch_normalization(inputs=conv_T_3)
         relu_4 = tf.nn.relu(batch_norm_4)
         # 64*64
         conv_T_4 = layers.conv2d_transpose(
             inputs=relu_4, filters=3, kernel_size=(2, 2), strides=(2, 2), padding='same')
         tanh_1 = tf.nn.tanh(conv_T_4)
         print(tanh_1)
         return tanh_1
예제 #11
0
파일: DCGAN_CIFAR10.py 프로젝트: Ninei/GANs
def generator(z):
    with tf.variable_scope("generator", reuse=tf.AUTO_REUSE):

        with tf.variable_scope("linear"):
            linear = clayers.fully_connected(z, 1024 * 4 * 4)

        with tf.variable_scope("conv1_transp"):
            # Reshape as 4x4 images
            conv1 = tf.reshape(linear, (-1, 4, 4, 1024))
            conv1 = default_conv2d_transpose(conv1, 512)
            conv1 = layers.batch_normalization(conv1)
            conv1 = nn.relu(conv1)

        with tf.variable_scope("conv2_transp"):
            conv2 = default_conv2d_transpose(conv1, 256)
            conv2 = layers.batch_normalization(conv2)
            conv2 = nn.relu(conv2)

        with tf.variable_scope("conv3_transp"):
            conv3 = default_conv2d_transpose(conv2, 128)
            conv3 = layers.batch_normalization(conv3)
            conv3 = nn.relu(conv3)

        with tf.variable_scope("conv4_transp"):
            conv4 = default_conv2d_transpose(conv3, 3)

        with tf.variable_scope("out"):
            out = tf.tanh(conv4)
    return out
예제 #12
0
    def _bottleneck_brick(incoming,
                          nb_filters,
                          is_training,
                          scope,
                          trainable=True):
        """ Code brick: conv --> conv .
        """
        with tf.variable_scope(scope):
            code1 = layers.conv2d(incoming,
                                  filters=nb_filters,
                                  kernel_size=1,
                                  strides=1,
                                  padding='same',
                                  kernel_initializer=he_init,
                                  bias_initializer=b_init)
            code1_bn = layers.batch_normalization(code1,
                                                  training=is_training,
                                                  trainable=trainable)
            code1_act = tf.nn.relu(code1_bn)

            code2 = layers.conv2d(code1_act,
                                  filters=nb_filters,
                                  kernel_size=1,
                                  strides=1,
                                  padding='same',
                                  kernel_initializer=he_init,
                                  bias_initializer=b_init)
            code2_bn = layers.batch_normalization(code2,
                                                  training=is_training,
                                                  trainable=trainable)
            code2_act = tf.nn.relu(code2_bn)

        return code2_act
예제 #13
0
def dense_block(x,
                iter,
                two_conv,
                one_conv,
                is_train=False,
                name='denseblock'):

    with tf.variable_scope(name):
        net = x
        for i in range(iter):
            x = tl.batch_normalization(net,
                                       trainable=is_train,
                                       name=name + '_bn1/' + str(i))
            x = tf.nn.relu(x)
            x = tl.conv2d(x,
                          one_conv, (1, 1),
                          padding='same',
                          name=name + '_conv1/' + str(i))
            x = tl.batch_normalization(x,
                                       trainable=is_train,
                                       name=name + '_bn2/' + str(i))
            x = tf.nn.relu(x)
            x = tl.conv2d(x,
                          two_conv, (3, 3), (1, 1),
                          padding='same',
                          name=name + '_conv2/' + str(i))
            net = tf.concat([x, net], -1)
        return net
예제 #14
0
def generator(noise, reuse=False, alpha=0.2, training=True):

    with tf.variable_scope('generator', reuse=reuse):

        x = dense(noise, 4 * 4 * 512)
        x = tf.reshape(x, (-1, 4, 4, 512))
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 256, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 128, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 64, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        logits = conv2d_transpose(x, 3, 5, 2, padding='same')
        out = tf.tanh(logits)

        return out, logits
예제 #15
0
파일: Model.py 프로젝트: mtes4207/GAN_Model
def generator(z, batch_size, z_dim, test_train, reuse1=None):
    with tf.variable_scope("generator",reuse=reuse1):
        init = tf.truncated_normal_initializer(stddev=0.1)
        layer1 = Dnn( z, 500,"1",is_training=test_train)
        layer2 = Dnn( layer1, 1000, "2",is_training=test_train)
        layer3 = Dnn( layer2, 10000, "3",is_training=test_train)
        g = tf.reshape( layer3, [-1,10,10,100])

        # Generate 50 features
        g_w1 = tf.get_variable('g_cw1', [2, 2, 50, 100], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.5))
        g_b1 = tf.get_variable('g_cb1', [50], initializer=init)
        g1 = tf.nn.conv2d_transpose(g, g_w1, [batch_size, 19, 19, 50], strides=[1, 2, 2, 1], padding='SAME')
        g1 = g1 + g_b1
        g1 = tf.nn.relu(g1)
        g1 = batch_normalization(g1, name='bn1', training=test_train)

        # Generate 25 features
        g_w2 = tf.get_variable('g_cw2', [2, 2, 25, 50], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=0.5))
        g_b2 = tf.get_variable('g_cb2', [25], initializer=init)
        g2 = tf.nn.conv2d_transpose(g1, g_w2, [batch_size, 38, 38, 25], strides=[1, 2, 2, 1], padding='VALID')
        g2 = g2 + g_b2
        g2 = tf.nn.relu(g2)
        g2 = batch_normalization(g2, name='bn2', training = test_train)

        # final features
        g_w3 = tf.get_variable('g_cw3', [2, 2, 3, 25], dtype=tf.float32,
                               initializer=tf.truncated_normal_initializer(stddev=1))
        g_b3 = tf.get_variable('g_cb3', [3], initializer=init)
        g3 = tf.nn.conv2d_transpose(g2, g_w3, [batch_size, 76, 76, 3], strides=[1, 2, 2, 1], padding='VALID')
        g3 = g3 + g_b3
        g3 = tf.sigmoid(g3)
        return g3
예제 #16
0
    def forward(self, X, momentum=0.5):
        z = tf.matmul(X,self.W1)
        z = tf.nn.leaky_relu(z)
        #Reshape to 4d tensor
        z = tf.reshape(z,[-1,8,8,self.layer_sizes[0]])
        #8,8

        #Upsampling to increase image size
        z = UpSampling2D()(z) #keras
        z = conv2d(z,self.W2,[1,1,1,1],padding="SAME")
        z = batch_normalization(z,momentum=momentum)
        z = tf.nn.leaky_relu(z)
        #16,16

        z = UpSampling2D()(z) #keras
        z = conv2d(z,self.W3,[1,1,1,1],padding="SAME")
        z = batch_normalization(z,momentum=momentum)
        z = tf.nn.leaky_relu(z)
        #32,32

        z = UpSampling2D()(z) #keras
        z = conv2d(z,self.W4,[1,1,1,1],padding="SAME")
        z = batch_normalization(z,momentum=momentum)
        z = tf.nn.leaky_relu(z)
        #64,64

        z = conv2d(z,self.W5,[1,1,1,1],padding="SAME")
        #64,64,3



        return tf.nn.tanh(z)
예제 #17
0
def discriminator(x, reuse=False, alpha=0.2, training=True):
    """
    Discriminator model, taking `x` as input.
    """

    with tf.variable_scope('discriminator', reuse=reuse):

        x = conv2d(x, 32, 5, 2, padding='same')
        x = tf.maximum(alpha * x, x)

        x = conv2d(x, 64, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d(x, 128, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d(x, 256, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        flatten = tf.reshape(x, (-1, 4 * 4 * 256))
        logits = dense(flatten, 1)
        out = tf.sigmoid(logits)

        return logits
예제 #18
0
        def _x(ip):
            x = batch_normalization(ip, **self.bn_kwargs)
            x = tf.nn.relu(x)

            if self.bottleneck:
                inter_channel = nb_filter * 4

                x = conv2d(x,
                           inter_channel, (1, 1),
                           kernel_initializer='he_normal',
                           padding='same',
                           use_bias=False,
                           **self.conv_kwargs)
                x = batch_normalization(x, **self.bn_kwargs)
                x = tf.nn.relu(x)

            x = conv2d(x,
                       nb_filter, (3, 3),
                       kernel_initializer='he_normal',
                       padding='same',
                       use_bias=False,
                       **self.conv_kwargs)

            if self.dropout_rate:
                x = dropout(x, self.dropout_rate, training=self.training)

            return x
예제 #19
0
파일: train.py 프로젝트: fendaq/infoGAN-tf
def discriminatorNet(inputs, is_training, use_batchNorm):
    idx = 0
    f = inputs
    f = layers.conv2d(f,
                      64,
                      kernel_size=4,
                      strides=2,
                      padding="SAME",
                      name="conv_%d" % idx)
    if use_batchNorm:
        f = layers.batch_normalization(f,
                                       training=is_training,
                                       name="bn_%d" % idx)
    f = tf.nn.leaky_relu(f, alpha=0.01, name="lrelu_%d" % idx)

    idx += 1
    f = layers.conv2d(f,
                      128,
                      kernel_size=4,
                      strides=2,
                      padding="SAME",
                      name="conv_%d" % idx)
    if use_batchNorm:
        f = layers.batch_normalization(f,
                                       training=is_training,
                                       name="bn_%d" % idx)
    f = tf.nn.leaky_relu(f, alpha=0.01, name="lrelu_%d" % idx)

    idx += 1
    f = layers.flatten(f)
    f = layers.dense(f, 1024, name="dense_%d" % idx)
    f = tf.nn.leaky_relu(f, alpha=0.01, name="lrelu_%d" % idx)
    return f
예제 #20
0
def generator(noise, reuse=False, alpha=0.2, training=True):
    """
    Generator model that takes `noise` as input. `alpha` is the
    Leaky-ReLU paramter for slope.
    """

    with tf.variable_scope('generator', reuse=reuse):

        x = dense(noise, 4 * 4 * 512)
        x = tf.reshape(x, (-1, 4, 4, 512))
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 256, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 128, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        x = conv2d_transpose(x, 64, 5, 2, padding='same')
        x = batch_normalization(x, training=training)
        x = tf.maximum(alpha * x, x)

        logits = conv2d_transpose(x, 3, 5, 2, padding='same')
        out = tf.tanh(logits)

        return out
예제 #21
0
def f_net_ddp(inputs, action_shape, is_training):
    depth_state = inputs.get_shape()[1:].num_elements()
    inputs = tf.reshape(inputs, shape=[-1, depth_state], name='inputs')
    inputs = layers.batch_normalization(inputs, axis=1, training=is_training)
    hidden1 = layers.dense(
        inputs=inputs, units=400,
        activation=None,
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='hidden1',
    )
    # hidden1 = tf.nn.relu(hidden1)
    hidden1 = tf.nn.relu(layers.batch_normalization(hidden1, axis=1, training=is_training))
    hidden2 = layers.dense(
        inputs=hidden1, units=300,
        activation=None,
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='hidden2',
    )
    # hidden2 = tf.nn.relu(layers.batch_normalization(hidden2, axis=1, training=is_training))
    hidden2 = tf.nn.relu(hidden2)
    depth_action = reduce(lambda x, y: x*y, action_shape, 1)
    action = layers.dense(
        inputs=hidden2, units=depth_action,
        activation=tf.nn.tanh,
        kernel_initializer=tf.random_uniform_initializer(minval=-3e-3, maxval=3e-3),
        kernel_regularizer=l2_regularizer(scale=1e-2),
        trainable=True, name='out'
    )
    action = tf.reshape(2.0*action, shape=[-1]+list(action_shape), name='out')

    return action
예제 #22
0
파일: DCGAN_CIFAR10.py 프로젝트: Ninei/GANs
def discriminator(x):
    with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("conv1"):
            conv1 = default_conv2d(x, 128)
            conv1 = nn.leaky_relu(conv1, alpha=0.2)

        with tf.variable_scope("conv2"):
            conv2 = default_conv2d(conv1, 256)
            conv2 = layers.batch_normalization(conv2)
            conv2 = nn.leaky_relu(conv2, alpha=0.2)

        with tf.variable_scope("conv3"):
            conv3 = default_conv2d(conv2, 512)
            conv3 = layers.batch_normalization(conv3)
            conv3 = nn.leaky_relu(conv3, alpha=0.2)

        with tf.variable_scope("conv4"):
            conv4 = default_conv2d(conv3, 1024)
            conv4 = layers.batch_normalization(conv3)
            conv4 = nn.leaky_relu(conv3, alpha=0.2)

        with tf.variable_scope("linear"):
            linear = clayers.flatten(conv4)
            linear = clayers.fully_connected(linear, 1)

        with tf.variable_scope("out"):
            out = nn.sigmoid(linear)
    return out
예제 #23
0
    def forward2(self, X, momentum=0.5):
        # 1th layer
        z = conv2d(X,self.W1,[1,2,2,1],padding="SAME")  #Size 14,14,64
        #add bias
        z = tf.nn.bias_add(z,self.b1)
        #Activation Function
        z = tf.nn.leaky_relu(z)

        # 2nd layer
        z = conv2d(z,self.W2,[1,1,1,1],padding="SAME")  #Size 14,14,64
        z = tf.nn.bias_add(z,self.b2)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # 3th layer
        z = conv2d(z,self.W3,[1,2,2,1],padding="SAME")  #Size 7,7,128
        z = tf.nn.bias_add(z,self.b3)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # 4th layer
        z = conv2d(z,self.W4,[1,1,1,1],padding="SAME")  #Size 7,7,256
        z = tf.nn.bias_add(z,self.b4)
        z = batch_normalization(z, momentum=momentum)
        z = tf.nn.leaky_relu(z)

        # Fully Connected Layer
        # Flatten Image
        z = tf.reshape(z,[-1, 7*7*256])
        logits = tf.matmul(z, self.W5)
        logits = tf.nn.bias_add(logits, self.b5)
        return logits   #Activation Function included in cost function
예제 #24
0
 def res_block(self, x, num_filter, is_train, name):
     with tf.variable_scope(name, reuse=self.reuse):
         y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
         y = layer.conv2d(y, num_filter, 3, 1, name='_res1')
         y = layer.batch_normalization(y, center=True, scale=True, training=is_train, name='_b1')
         y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
         y = layer.conv2d(y, num_filter, 3, 1, name='_res2')
         y = layer.batch_normalization(y, center=True, scale=True, training=is_train, name='_b2')
     return x + y
예제 #25
0
def _build_discriminator(input_data, reuse_variables=False, name='discriminator'):
    with tf.variable_scope(name, reuse=reuse_variables):
        net = layers.conv2d(input_data, 16, [3, 3], strides=[2, 2], activation=tf.nn.relu, padding='same', name='conv2d_1')
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d(net, 32, [3, 3], strides=[2, 2], activation=tf.nn.relu, padding='same', name='conv2d_2')
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d(net, 64, [3, 3], strides=[2, 2], activation=tf.nn.relu, padding='same', name='conv2d_3')
        net = layers.conv2d(net, 128, [3, 3], strides=[2, 2], activation=tf.nn.relu, padding='same', name='conv2d_4')
        net = contrib_layers.flatten(net)
        net = layers.dense(net, 1)
    return net
예제 #26
0
def create_resnet_block(x, filters, training, activation=tf.nn.elu, skip_connection=False):
    h = layers.conv2d(x, filters, kernel_size=3, activation=None, padding="SAME", kernel_initializer=xavier_initializer(), bias_initializer=None)
    h = layers.batch_normalization(h, momentum=0.9, scale=True, fused=True, training=training)
    h = activation(h)
    h = layers.conv2d(h, filters, kernel_size=3, activation=None, padding="SAME", kernel_initializer=xavier_initializer(), bias_initializer=None)

    if not skip_connection:
        h = h+x

    h = layers.batch_normalization(h, momentum=0.9, scale=True, fused=True, training=training)
    h = activation(h)

    return h
예제 #27
0
def generator_net(inputs, scope, reuse=None, rgb=False):
	
	output_channels = 3 if rgb else 1
	
	with tf.variable_scope(scope, reuse=reuse):
	
		# branch  1 ( color reconstruction)
		
		cv1   = conv2d(inputs, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_i')
		cv1_r = leaky_relu(cv1)
		
		res1_c = conv2d(cv1_r, filters=16, kernel_size=5, strides=1, padding='same', activation=None, name='conv3a_1')
		res1_b = batch_normalization(res1_c)
		res1_r = leaky_relu(res1_b)
		
		res1_d = conv2d(res1_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_1')
		res1   = batch_normalization(res1_d)
		
		sum1  = cv1 + res1
		
		res2_c = conv2d(sum1, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_2')
		res2_b = batch_normalization(res2_c)
		res2_r = leaky_relu(res2_b)
		
		res2_d = conv2d(res2_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_2')
		res2   = batch_normalization(res2_d)
		
		br1 = sum1 + res2
		
		
		# branch 2 (features extraction)
		br2 = conv2d(inputs, filters=16, kernel_size=5, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf1')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool1')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf2')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2a')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf3')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2')
		
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_1")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_2")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_3")
		print(br2.shape)
		
		# concatenate branches and reconstruct image
		sum3 = tf.concat((br1, br2), axis=3);
		model = conv2d(sum3, filters=output_channels, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_f')
		
		return model
예제 #28
0
def misconception_fishing(inputs,
                          filters_list,
                          kernel_size,
                          strides_list,
                          objective_function,
                          training,
                          pre_filters=128,
                          post_filters=128,
                          post_layers=1,
                          dropout_rate=0.5,
                          internal_dropout_rate=0.5,
                          other_objectives=(),
                          feature_means=None,
                          feature_stds=None):

    _, layers = misconception_model(inputs,
                                    filters_list,
                                    kernel_size,
                                    strides_list,
                                    training,
                                    other_objectives,
                                    sub_filters=post_filters,
                                    sub_layers=2,
                                    dropout_rate=internal_dropout_rate,
                                    feature_means=feature_means,
                                    feature_stds=feature_stds)

    expanded_layers = []
    for i, lyr in enumerate(layers):
        lyr = ly.conv1d(lyr, pre_filters, 1, activation=None)
        lyr = ly.batch_normalization(lyr, training=training)
        lyr = tf.nn.relu(lyr)
        expanded_layers.append(repeat_tensor(lyr, 2**i))

    embedding = tf.add_n(expanded_layers)

    for _ in range(post_layers - 1):
        embedding = ly.conv1d(embedding,
                              post_filters,
                              1,
                              activation=None,
                              use_bias=False)
        embedding = ly.batch_normalization(embedding, training=training)
        embedding = tf.nn.relu(embedding)

    embedding = ly.conv1d(embedding, post_filters, 1, activation=tf.nn.relu)
    embedding = ly.dropout(embedding, training=training, rate=dropout_rate)

    fishing_outputs = ly.conv1d(embedding, 1, 1, activation=None)

    return objective_function.build(fishing_outputs)
예제 #29
0
 def batch_norm_layer(self, x, train_phase, scope_bn):
     bn_train = batch_normalization(x,
                                    center=True,
                                    scale=True,
                                    training=True,
                                    name=scope_bn)
     bn_inference = batch_normalization(x,
                                        center=True,
                                        scale=True,
                                        training=False,
                                        reuse=True,
                                        name=scope_bn)
     bn = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
     return bn
예제 #30
0
def _build_generator(input_data, name='generator'):
    with tf.variable_scope(name):
        net = layers.dense(input_data, 128)
        net = tf.nn.relu(net)
        net = tf.reshape(net, [-1, 4, 4, 8])
        net = layers.conv2d_transpose(net, 128, [5, 5], activation=tf.nn.relu, strides=[2, 2], padding='same')  # 8x8
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 64, [5, 5], activation=tf.nn.relu, strides=[2, 2])  # 19x19
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 32, [5, 5], activation=tf.nn.relu)  # 23x23
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 16, [5, 5], activation=tf.nn.relu)  # 27x27
        net = layers.batch_normalization(net, momentum=0.9, training=True)
        net = layers.conv2d_transpose(net, 1, [2, 2], activation=tf.nn.relu)  # 28x28
    return net