Example #1
0
def discriminator(x):
    with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("conv1"):
            conv1 = default_conv2d(x, 128)
            conv1 = nn.leaky_relu(conv1, alpha=0.2)

        with tf.variable_scope("conv2"):
            conv2 = default_conv2d(conv1, 256)
            conv2 = layers.batch_normalization(conv2)
            conv2 = nn.leaky_relu(conv2, alpha=0.2)

        with tf.variable_scope("conv3"):
            conv3 = default_conv2d(conv2, 512)
            conv3 = layers.batch_normalization(conv3)
            conv3 = nn.leaky_relu(conv3, alpha=0.2)

        with tf.variable_scope("conv4"):
            conv4 = default_conv2d(conv3, 1024)
            conv4 = layers.batch_normalization(conv3)
            conv4 = nn.leaky_relu(conv3, alpha=0.2)

        with tf.variable_scope("linear"):
            linear = clayers.flatten(conv4)
            linear = clayers.fully_connected(linear, 1)

        with tf.variable_scope("out"):
            out = nn.sigmoid(linear)
    return out
def generator_net(inputs, scope, reuse=None, rgb=False):
	
	output_channels = 3 if rgb else 1
	
	with tf.variable_scope(scope, reuse=reuse):
	
		# branch  1 ( color reconstruction)
		
		cv1   = conv2d(inputs, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_i')
		cv1_r = leaky_relu(cv1)
		
		res1_c = conv2d(cv1_r, filters=16, kernel_size=5, strides=1, padding='same', activation=None, name='conv3a_1')
		res1_b = batch_normalization(res1_c)
		res1_r = leaky_relu(res1_b)
		
		res1_d = conv2d(res1_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_1')
		res1   = batch_normalization(res1_d)
		
		sum1  = cv1 + res1
		
		res2_c = conv2d(sum1, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_2')
		res2_b = batch_normalization(res2_c)
		res2_r = leaky_relu(res2_b)
		
		res2_d = conv2d(res2_r, filters=16, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_2')
		res2   = batch_normalization(res2_d)
		
		br1 = sum1 + res2
		
		
		# branch 2 (features extraction)
		br2 = conv2d(inputs, filters=16, kernel_size=5, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf1')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool1')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf2')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2a')
		br2 = conv2d(br2, filters=16, kernel_size=3, strides=1, padding='same', activation=tf.nn.leaky_relu, name='conv_bf3')
		br2 = max_pooling2d(br2, pool_size=2, strides=2, name='maxpool2')
		
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_1")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_2")
		print(br2.shape)
		br2 = conv2d_transpose(br2, filters=16, kernel_size=3, padding='same', strides=2, activation=tf.nn.leaky_relu, name="deconv_3")
		print(br2.shape)
		
		# concatenate branches and reconstruct image
		sum3 = tf.concat((br1, br2), axis=3);
		model = conv2d(sum3, filters=output_channels, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_f')
		
		return model
Example #3
0
def conv_layer(x,
               filtershape,
               stride,
               name,
               LReLU=True,
               BN=True,
               IS_Training=True):
    with tf.variable_scope(name):
        filters = tf.get_variable(name='weight',
                                  shape=filtershape,
                                  dtype=tf.float32,
                                  initializer=tf.random_normal_initializer(
                                      mean=0, stddev=0.2),
                                  trainable=True)
        conv = tf.nn.conv2d(x, filters, [1, stride, stride, 1], padding='SAME')
        conv_biases = tf.Variable(tf.constant(0.1,
                                              shape=[filtershape[3]],
                                              dtype=tf.float32),
                                  trainable=True,
                                  name='bias')
        bias = tf.nn.bias_add(conv, conv_biases)
        output = bias
        if (BN == True):
            output = bn(output, IS_Training, name + '_bn')
        if (LReLU == True):
            output = nn.leaky_relu(bias)

        #output = tf.nn.dropout(prelu, keep_prob=keep_prob)
        #img_filt = tf.reshape(filters[:,:,:,1], [-1,filtershape[0],filtershape[1],1])
        #tf.summary.image('conv_filter',img_filt)
        return output
Example #4
0
def ACT(inputs, act_fn):
    if act_fn == 'relu':
        act = relu(inputs)
    elif act_fn == 'lrelu':
        act = leaky_relu(inputs)
    elif act_fn == 'sigmoid':
        act = sigmoid(inputs)
    return act
Example #5
0
def leaky_relu(inputs, alpha=0.2):
    """
    Leaky relu activation

    Parameters
    ----------
    inputs: Input tensor
    alpha: Slope of negative neurons
    """
    return nn.leaky_relu(inputs, alpha=alpha)
Example #6
0
def convBatchNorm(num,
                  x,
                  num_filters,
                  filter_size=5,
                  strides=(2, 2),
                  training=True):
    with tf.variable_scope('conv-batch-norm-{}'.format(num)):
        x = conv2d(x, num_filters, filter_size, strides=strides)
        x = batch_norm(x, training=training)
        x = leaky_relu(x)
        return x
Example #7
0
 def __init__(self, images, params, reuse=False):
     with tf.variable_scope("RGBNetwork", reuse=reuse) as scope:
         x = tf.reshape(images, [-1, 200, 200, 3])
         x = conv2d(x, 64, (10, 10), strides=(4, 4), padding='same')
         x = leaky_relu(x)
         x = max_pooling2d(x, (5, 5), strides=(2, 2), padding='same')
         x = conv2d(x, 128, (2, 2), strides=(2, 2), padding='same')
         x = leaky_relu(x)
         x = max_pooling2d(x, (2, 2), strides=(2, 2), padding='same')
         x = conv2d(x, 256, (3, 3), strides=(1, 1), padding='same')
         x = leaky_relu(x)
         x = conv2d(x, 512, (3, 3), strides=(1, 1), padding='same')
         x = leaky_relu(x)
         x = flatten(x)
         x = dense(x, 256, activation=leaky_relu)
         x = dense(x, 200 * 200, activation=None)
         x = tf.reshape(x, [-1, 200, 200])
         self.output = tf.cast(x, tf.float32)
     self.parameters = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                         scope="SimpleNetwork")
Example #8
0
def ACT(inputs, act_fn):
    if act_fn == 'relu':
        act = relu(inputs)
    elif act_fn == 'lrelu':
        act = leaky_relu(inputs)
    elif act_fn == 'sigmoid':
        act = sigmoid(inputs)
    elif act_fn == 'tanh':
        act = tanh(inputs)
    else:
        act = inputs
    return act
def convolutional_autoencoder_net(inputs, scope, reuse=None, rgb=False):
	
	output_channels = 3 if rgb else 1
	
	with tf.variable_scope(scope, reuse=reuse):
		
		cv1   = conv2d(inputs, filters=32, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_i')
		cv1_r = leaky_relu(cv1)
		
		res1_c = conv2d(cv1_r, filters=32, kernel_size=5, strides=1, padding='same', activation=None, name='conv3a_1')
		res1_b = batch_normalization(res1_c)
		res1_r = leaky_relu(res1_b)
		
		res1_d = conv2d(res1_r, filters=32, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_1')
		res1   = batch_normalization(res1_d)
		
		sum1  = cv1 + res1
		
		res2_c = conv2d(sum1, filters=32, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_2')
		res2_b = batch_normalization(res2_c)
		res2_r = leaky_relu(res2_b)
		
		res2_d = conv2d(res2_r, filters=32, kernel_size=5, strides=1, padding='same', activation=None, name='conv3b_2')
		res2   = batch_normalization(res2_d)
		
		sum2 = sum1 + res2
		
		res3_c = conv2d(sum2, filters=32, kernel_size=3, strides=1, padding='same', activation=None, name='conv3a_3')
		res3_b = batch_normalization(res3_c)
		res3_r = leaky_relu(res3_b)
		
		res3_d = conv2d(res3_r, filters=32, kernel_size=3, strides=1, padding='same', activation=None, name='conv3b_3')
		res3   = batch_normalization(res3_d)
		
		sum3 = sum2 + res3

		model = conv2d(sum3, filters=output_channels, kernel_size=3, strides=1, padding='same', activation=None, name='conv9_f')
		
		return model
Example #10
0
 def predict(self, inputTensor, _):
     result = nn.leaky_relu(inputTensor, self.alpha)
     return (result)
Example #11
0
def relu(x):
    return nn.leaky_relu(x)
Example #12
0
def _silu(x):
    """(nn.silu())"""
    return x * tf.sigmoid(x)


x = tf.constant([[-1., 0, 1], [0, 0, 1]])
print(_relu(x))
print(nn.relu(x))
# tf.Tensor(
# [[0. 0. 1.]
#  [0. 0. 1.]], shape=(2, 3), dtype=float32)
# tf.Tensor(
# [[0. 0. 1.]
#  [0. 0. 1.]], shape=(2, 3), dtype=float32)
print(_leaky_relu(x))
print(nn.leaky_relu(x))
# tf.Tensor(
# [[-0.2  0.   1. ]
#  [ 0.   0.   1. ]], shape=(2, 3), dtype=float32)
# tf.Tensor(
# [[-0.2  0.   1. ]
#  [ 0.   0.   1. ]], shape=(2, 3), dtype=float32)
print(_sigmoid(x))
print(nn.sigmoid(x))
# tf.Tensor(
# [[0.26894143 0.5        0.73105854]
#  [0.5        0.5        0.73105854]], shape=(2, 3), dtype=float32)
# tf.Tensor(
# [[0.26894143 0.5        0.73105854]
#  [0.5        0.5        0.7310586 ]], shape=(2, 3), dtype=float32)
print(_tanh(x))
Example #13
0
 def _GuidedReluGrad(op, grad):
     return tf.where(0. < grad, nn.leaky_relu(grad, op.outputs[0]),
                     tf.zeros_like(grad))