Esempio n. 1
0
File: ResNet.py Progetto: iiharu/NN
 def conv(self, inputs, filters, kernel_size, strides=1):
     inputs = batch_normalization()(inputs)
     inputs = relu()(inputs)
     inputs = conv2d(filters=filters,
                     kernel_size=kernel_size,
                     strides=strides)(inputs)
     return inputs
Esempio n. 2
0
def separable_conv_relu_block(inputs, filters, kernel_size=(3, 3), strides=1):
    inputs = separable_conv_block(inputs,
                                  filters=filters,
                                  kernel_size=kernel_size,
                                  strides=strides)
    inputs = relu()(inputs)
    return inputs
Esempio n. 3
0
 def activate(self, param):
     inp = self.result
     with tf.name_scope('activation_' + str(self.layernum)):
         if param == 0:
             res = L.relu(inp, name='relu_' + str(self.layernum))
         elif param == 1:
             res = L.lrelu(inp, name='lrelu_' + str(self.layernum))
         elif param == 2:
             res = L.elu(inp, name='elu_' + str(self.layernum))
         elif param == 3:
             res = L.tanh(inp, name='tanh_' + str(self.layernum))
         elif param == 4:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFM(inp,
                         self.inpsize[-1],
                         name='mfm_' + str(self.layernum))
         elif param == 5:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFMfc(inp,
                           self.inpsize[-1],
                           name='mfm_' + str(self.layernum))
         elif param == 6:
             res = L.sigmoid(inp, name='sigmoid_' + str(self.layernum))
         else:
             res = inp
     self.result = res
     return self.result
 def __init__(self):
     self.lr = 0.01
     # conv net
     self.c1 = conv(1, 6, kernel=5, learning_rate=self.lr)
     self.relu1 = relu()
     self.s2 = max_pool(kernel=2, stride=2)
     self.c3 = conv(6, 16, kernel=5, learning_rate=self.lr)
     self.relu3 = relu()
     self.s4 = max_pool(kernel=2, stride=2)
     self.c5 = conv(16, 120, kernel=4, learning_rate=self.lr)
     self.relu5 = relu()
     # fc net
     self.f6 = fc(120, 84, learning_rate=self.lr)
     self.relu6 = relu()
     self.f7 = fc(84, 10)
     self.sig7 = softmax()
     # record the shape between the conv net and fc net
     self.conv_out_shape = None
Esempio n. 5
0
    def classifier_aux(self, inputs, classes):
        filters = 128
        outputs = average_pooling2d(pool_size=(5, 5),
                                    strides=3,
                                    padding='valid')(inputs)
        outputs = conv2d(filters=filters,
                         kernel_size=(1, 1),
                         strides=1,
                         padding='same')(outputs)
        outputs = flatten()(outputs)
        outputs = relu()(outputs)
        outputs = dense(1024)(outputs)
        outputs = relu()(outputs)
        outputs = dropout(0.7)(outputs)
        outputs = dense(classes)(outputs)
        outputs = softmax()(outputs)

        return outputs
Esempio n. 6
0
	def deploy(self,input_layer):
		with tf.variable_scope('RPN_',reuse=self.reuse):
			shared_feature = L.conv2D(input_layer,3,512,stride=self.anchor_stride,name='share_conv')
			shared_feature = L.relu(shared_feature,'share_relu')
			rpn_bf_logits = L.conv2D(shared_feature,1,2*self.anchors_per_loc,'bf')
			rpn_bf_logits = tf.reshape(rpn_bf_logits,[tf.shape(rpn_bf_logits)[0],-1,2])
			rpn_bf_prob = tf.nn.softmax(rpn_bf_logits)
			rpn_bbox = L.conv2D(shared_feature,1,4*self.anchors_per_loc,'bbox')
			rpn_bbox = tf.reshape(rpn_bbox,[tf.shape(rpn_bbox[0],-1,2)])
		self.reuse = True
		return rpn_bf_logits, rpn_bf_prob, rpn_bbox
    def __init__(self, learning_rate, input_shape,
                 BS):  #input_shape example: [BS,1,28,28]
        self.lr = learning_rate

        self.conv2d_1 = ly.conv2d(input_shape, [5, 5, 1, 6], [1, 1], 'VALID')
        self.relu_1 = ly.relu()

        self.conv2d_2 = ly.conv2d([BS, 6, 12, 12], [3, 3, 6, 10], [2, 2],
                                  'VALID')
        self.relu_2 = ly.relu()

        self.flatter = ly.flatter()

        self.full_connect_1 = ly.full_connect(250, 84)
        self.relu_3 = ly.relu()
        self.dropout = ly.dropout(lenth=84)

        self.full_connect_2 = ly.full_connect(84, 10)

        self.loss_func = ly.softmax_cross_entropy_error()
Esempio n. 8
0
    def __init__(self, input_shape, learning_rate=0.001):
        BS = input_shape[0]
        self.lr = learning_rate
        #conv1 : 1*28*28->6*12*12
        self.conv1 = ly.conv2d(input_shape, [5, 5, 1, 6], [1, 1], 'same')
        self.conv1_relu = ly.relu()
        self.pool1 = ly.max_pooling(self.conv1.out_shape, [3, 3], [2, 2],
                                    'valid')
        # conv2 : 6*12*12 - > 10*5*5
        self.conv2 = ly.conv2d(self.pool1.out_shape, [3, 3, 6, 10], [1, 1],
                               'same')
        self.conv2_relu = ly.relu()
        self.pool2 = ly.max_pooling(self.conv2.out_shape, [3, 3], [2, 2],
                                    'valid')

        self.conv_fc = ly.conv_fc()
        self.fc1 = ly.full_connect(360, 84)
        self.fc1_relu = ly.relu()
        self.fc2 = ly.full_connect(84, 10)
        self.loss = ly.softmax_cross_with_entropy()
Esempio n. 9
0
    def transition_block(self, inputs):
        filters = keras.backend.int_shape(inputs)[
            self.batch_normalization_axis]
        if self.compression:
            filters = int(filters * (1 - self.reduction_rate))

        outputs = batch_normalization()(inputs)
        outputs = relu()(outputs)
        outputs = conv2d(filters=filters, kernel_size=(1, 1))(outputs)
        outputs = average_pooling2d(pool_size=(2, 2), strides=2)(outputs)

        return outputs
Esempio n. 10
0
    def discriminator(self, name, input, is_training=True):
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            input = tf.pad(input, [[0, 0], [1, 1], [1, 1], [0, 0]])
            input = ly.conv2d(input,
                              64,
                              strides=2,
                              kernal_size=4,
                              padding='VALID',
                              name='d_conv2d_0')
            input = ly.relu(input, alpha=0.2)

            input = tf.pad(input, [[0, 0], [1, 1], [1, 1], [0, 0]])
            input = ly.conv2d(input, 128, strides=2, name='d_conv2d_1')
            input = ly.batch_normal(input,
                                    name='d_bn1',
                                    is_training=is_training)
            input = ly.relu(input, alpha=0.2)

            ### 31 | 15
            input = tf.pad(input, [[0, 0], [1, 1], [1, 1], [0, 0]])
            input = ly.conv2d(input,
                              256,
                              strides=1,
                              name='d_conv2d_2',
                              padding='VALID')
            input = ly.batch_normal(input,
                                    name='d_bn2',
                                    is_training=is_training)
            input = ly.relu(input, alpha=0.2)

            ### 30 | 14
            input = tf.pad(input, [[0, 0], [1, 1], [1, 1], [0, 0]])
            input = ly.conv2d(input,
                              1,
                              strides=1,
                              name='d_conv2d_3',
                              padding='VALID')

            return tf.nn.sigmoid(input)
    def test_relu_forward(this):
        # Test the relu forward method.

        x = np.random.randint(low=-10, high=10, size=(30, 1))
        layer = layers.relu()
        y = layer.forward(x)
        y_exp = x
        y_exp[x < 0] = 0
        nptest.assert_array_equal(
            y,
            y_exp,
            err_msg="relu layer forward incorrect for \n x = \n {} \n".format(
                x))
Esempio n. 12
0
 def net(self, X, reuse=None):
     with tf.variable_scope('EyeNet', reuse=reuse):
         conv1 = conv2d(X,output_dims=20,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv1')   
         pool1 = max_pool(conv1,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool1')
         conv2 = conv2d(pool1,output_dims=50,k_h=5,k_w=5,s_h=1,s_w=1,padding='VALID',name='conv2')              
         pool2 = max_pool(conv2,k_h=2,k_w=2,s_h=2,s_w=2,padding='SAME',name='pool2') 
         flatten = tf.reshape(pool2,[-1, pool2.get_shape().as_list()[1]
                                         *pool2.get_shape().as_list()[2]
                                         *pool2.get_shape().as_list()[3]], name='conv_reshape')
         fc1 = fc(flatten, output_dims=500, name='fc1')
         relu1 = relu(fc1, name='relu1')
         out = fc(relu1, output_dims=2, name='output')
         return out
    def test_relu_x_gradient(this):
        # Test the x gradient of relu.

        x = np.random.randint(low=-10, high=10, size=(30, 1))
        layer = layers.relu()
        dydx = layer.x_gradient(x)
        idx = (layer.forward(x) != x)
        dydx_exp = np.eye(30)
        dydx_exp[idx[:, 0], idx[:, 0]] = 0
        nptest.assert_array_equal(
            dydx,
            dydx_exp,
            err_msg="relu layer x gradient incorrect for \n x = \n {} \n".
            format(x))
Esempio n. 14
0
def net(name,image,output0,trainable = True,reuse = None):
    with tf.variable_scope(name,reuse=reuse):
        params = []
        #conv bias 42->40
        conv1,k1 = layers.conv(name + "conv1",image,3,3,1,1,"VALID",1,16,trainable)
        bias1,b1 = layers.bias(name + "bias1",conv1,16,trainable)
        relu1 = layers.relu(name + "relu1",bias1)
        params += [k1,b1]
        #pool 40->20
        pool1 = layers.pooling(name + "pool1",relu1,2,2,2,2)
	#conv bias 20->20
        conv2,k2 = layers.conv(name + "conv2",pool1,3,3,1,1, "SAME",16,32,trainable)
        bias2,b2 = layers.bias(name + "bias2",conv2,32,trainable)
        relu2 = layers.relu(name + "relu2",bias2)
        params += [k2,b2]
        #conv bias 20->20
        conv2_,k2_ = layers.conv(name + "conv2_",relu2,3,3,1,1, "SAME",32,32,trainable)
        bias2_,b2_ = layers.bias(name + "bias2_",conv2_,32,trainable)
        relu2_ = layers.relu(name + "relu2_",bias2_)
        params += [k2_,b2_]
        #pool 20->10
        pool2 = layers.pooling(name + "pool2",relu2_,2,2,2,2)
	#conv bias 10->10
        conv3,k3 = layers.conv(name + "conv3",pool2,3,3,1,1,"SAME",32,64,trainable)
        bias3,b3 = layers.bias(name + "bias3",conv3,64,trainable)
        relu3 = layers.relu(name + "relu3",bias3)
        params += [k3,b3]
        #conv bias 10->10
        conv3_,k3_ = layers.conv(name + "conv3_",relu3,3,3,1,1,"SAME",64,64,trainable)
        bias3_,b3_ = layers.bias(name + "bias3_",conv3_,64,trainable)
        relu3_ = layers.relu(name + "relu3_",bias3_)
        params += [k3_,b3_]
        #pool 10->5
        pool3 = layers.pooling(name + "pool3",relu3_,2,2,2,2)
        #conv4 5->3
        conv4,k4 = layers.conv(name + "conv4",pool3,3,3,1,1,"VALID",64,128,trainable)
        bias4,b4 = layers.bias(name + "bias4",conv4,128,trainable)
        relu4 = layers.relu(name + "relu4",bias4)
        params += [k4,b4]
        #conv5 3->1
        conv5,k5 = layers.conv(name + "conv5",relu4,3,3,1,1,"VALID",128,128,trainable)
        bias5,b5 = layers.bias(name + "bias5",conv5,128,trainable)
        relu5 = layers.relu(name + "relu5",bias5)
        params += [k5,b5]
        #fcn
        feature0,dim0 = layers.reshapeToLine(relu5)
        fcn1,k6  = layers.fcn(name + "fcn1",feature0,dim0,output0,trainable)
        fcn1_bias,b6 = layers.bias(name + "fcn1_bias",fcn1,output0,trainable)
        params += [k6,b6]

        return fcn1_bias,params
Esempio n. 15
0
 def forward(self, x, weights=None, prefix=''):
     '''
 Runs the net forward; if weights are None it uses 'self' layers,
 otherwise keeps the structure and uses 'weights' instead.
 '''
     if weights is None:
         x = self.features(x)
         x = self.fa(x)
     else:
         for i in range(self.num_layers):
             x = linear(x, weights[prefix + 'fc' + str(i) + '.weight'],
                        weights[prefix + 'fc' + str(i) + '.bias'])
             if i < self.num_layers - 1: x = relu(x)
         x = self.fa(x)
     return x
Esempio n. 16
0
File: ResNet.py Progetto: iiharu/NN
 def __init__(
         self,
         blocks,
         filters,
         bottleneck=False,
         input_layers=[
             batch_normalization(),
             relu(),
             conv2d(filters=64, kernel_size=(7, 7), strides=2),
         ],
         output_layers=[average_pooling2d(pool_size=(2, 2)),
                        flatten()]):
     self.blocks = blocks
     self.filters = filters
     self.bottleneck = bottleneck
     self.bn_axis = -1 if keras.backend.image_data_format(
     ) == 'channels_last' else 1
     self.input_layers = input_layers
     self.output_layer = output_layers
Esempio n. 17
0
def forward_propagate(x, y, _weights, debug=True):

    activation_caches = {}

    m = x.shape[0]

    activation_caches["conv1"] = conv_fast(x, _weights["W1"], _weights["B1"],
                                           2, 1)
    activation_caches["A1"] = relu(activation_caches["conv1"])
    activation_caches["pool1"] = max_pooling(activation_caches["A1"], 2, 2)

    # Sanity check to make sure that our convolution vectorization is correct

    if debug:
        # Conv

        kconv, kcache = karpathy_conv_forward_naive(x, _weights["W1"],
                                                    _weights["B1"], {
                                                        'stride': 1,
                                                        'pad': 2
                                                    })
        assert np.mean(np.isclose(activation_caches["conv1"], kconv)) == 1.0

        conv1_verify = conv_forward_naive(x, _weights["W1"], _weights["B1"], 2,
                                          1)
        assert np.mean(np.isclose(activation_caches["conv1"],
                                  conv1_verify)) == 1.0

        kpool1, kcache1 = karpathy_max_pool_forward_naive(
            activation_caches["A1"], {
                'pool_height': 2,
                'pool_width': 2,
                'stride': 2
            })
        assert np.mean(np.isclose(activation_caches["pool1"], kpool1)) == 1.0

    activation_caches["conv2"] = conv_fast(activation_caches["pool1"],
                                           _weights["W2"], _weights["B2"], 2,
                                           1)
    activation_caches["A2"] = relu(activation_caches["conv2"])
    activation_caches["pool2"] = max_pooling(activation_caches["A2"], 2, 2)
    activation_caches["Ar2"] = activation_caches["pool2"].reshape(
        (m, activation_caches["pool2"].shape[1] *
         activation_caches["pool2"].shape[2] *
         activation_caches["pool2"].shape[3]))

    if debug:
        conv2_verify = conv_forward_naive(activation_caches["pool1"],
                                          _weights["W2"], _weights["B2"], 2, 1)
        assert np.mean(np.isclose(activation_caches["conv2"],
                                  conv2_verify)) == 1.0

    activation_caches["Z3"] = fully_connected(activation_caches["Ar2"],
                                              _weights["W3"], _weights["B3"])
    activation_caches["A3"] = relu(activation_caches["Z3"])

    activation_caches["Z4"] = fully_connected(activation_caches["A3"],
                                              _weights["W4"], _weights["B4"])
    activation_caches["A4"] = softmax(activation_caches["Z4"])

    cost = np.mean(softmax_cost(y, activation_caches["A4"], m))

    return activation_caches, cost
Esempio n. 18
0
    def build(self, input_shape, classes):

        inputs = keras.Input(shape=input_shape)

        # Entry flow
        filters = 32
        outputs = conv_relu_block(inputs, filters=filters, strides=(2, 2))

        filters = 64
        outputs = conv_relu_block(inputs, filters=filters)

        residual = outputs

        filters = 128
        outputs = separable_conv_block(outputs, filters=filters)
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = max_pooling2d(pool_size=(3, 3), strides=(2, 2))(outputs)
        residual = conv_block(residual,
                              filters=filters,
                              kernel_size=(1, 1),
                              strides=(2, 2))
        outputs = add()([outputs, residual])

        filters = 256
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = max_pooling2d(pool_size=(3, 3), strides=(2, 2))(outputs)
        residual = conv_block(residual,
                              filters=filters,
                              kernel_size=(1, 1),
                              strides=(2, 2))
        outputs = add()([outputs, residual])

        filters = 728
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = max_pooling2d(pool_size=(3, 3), strides=(2, 2))(outputs)
        residual = conv_block(residual,
                              filters=filters,
                              kernel_size=(1, 1),
                              strides=(2, 2))
        outputs = add()([outputs, residual])

        # Middle flow
        filters = 728
        for _ in range(8):
            residual = outputs

            outputs = relu_separable_conv_block(outputs, filters=filters)
            outputs = relu_separable_conv_block(outputs, filters=filters)
            outputs = relu_separable_conv_block(outputs, filters=filters)
            outputs = add()([outputs, residual])

        # Exit flow
        residual = outputs
        outputs = relu_separable_conv_block(outputs, filters=filters)

        filters = 1024
        outputs = relu_separable_conv_block(outputs, filters=filters)
        outputs = add()([outputs, residual])

        filters = 1536
        outputs = separable_conv_relu_block(outputs, filters=filters)

        filters = 2048
        outputs = separable_conv_relu_block(outputs, filters=filters)

        outputs = global_average_pooling2d()(outputs)

        outputs = dense(filters)(outputs)
        outputs = relu()(outputs)

        outputs = dense(1000)(outputs)
        outputs = softmax()(outputs)

        model = keras.Model(inputs, outputs)

        model.summary()

        return model
Esempio n. 19
0
# Number of classes
K = 10
# Dimensions of input
num_examples, Xh, Xw = X_train.shape
Xd = 1

###########################
# Multilayer Perceptron classifier
###########################

# Initialise parameters
cnn1 = cnn2d(input_shape=(Xh,Xw,Xd), filter_shape=(f1_field, f1_field), num_filters=h1_units)
def flatten(x, n_examples):
    return np.reshape(x, (n_examples,-1))
relu1 = relu()
fc2 = fc2d(cnn1.yw*cnn1.yh*Xd*h1_units, K)
data_loss_fn = softmax_loss(y_train)

for i in range(n_epochs):
    # Forward pass
    conv1 = cnn1.forward(X_train)
    print("conv1:", conv1.shape)
    flatten1 = flatten(conv1, num_examples)
    h1 = relu1.forward(flatten1)
    scores = fc2.forward(h1)
    data_loss = data_loss_fn.forward(scores)
    reg_loss = 0.5*reg*(np.sum(cnn1.W * cnn1.W) + np.sum(fc2.W*fc2.W))
    loss = data_loss + reg_loss
    if i % 1 == 0:
        print("Epoch: %d, Loss: %f" % (i, loss))
###########################
# Multilayer Perceptron classifier
###########################

# Initialise parameters
cnn1 = cnn2d(input_shape=(Xh, Xw, Xd),
             filter_shape=(f1_field, f1_field),
             num_filters=h1_units)


def flatten(x, n_examples):
    return np.reshape(x, (n_examples, -1))


relu1 = relu()
fc2 = fc2d(cnn1.yw * cnn1.yh * Xd * h1_units, K)
data_loss_fn = softmax_loss(y_train)

for i in range(n_epochs):
    # Forward pass
    conv1 = cnn1.forward(X_train)
    print("conv1:", conv1.shape)
    flatten1 = flatten(conv1, num_examples)
    h1 = relu1.forward(flatten1)
    scores = fc2.forward(h1)
    data_loss = data_loss_fn.forward(scores)
    reg_loss = 0.5 * reg * (np.sum(cnn1.W * cnn1.W) + np.sum(fc2.W * fc2.W))
    loss = data_loss + reg_loss
    if i % 1 == 0:
        print("Epoch: %d, Loss: %f" % (i, loss))
Esempio n. 21
0
File: VGG.py Progetto: iiharu/NN
 def conv_block(self, inputs, filters, blocks):
     for _ in range(blocks):
         inputs = conv2d(filters=filters, kernel_size=(3, 3))(inputs)
         inputs = relu()(inputs)
     return inputs
 def test_get_training_parameters(this):
     # Test the get_training_parameters method.
     layer = layers.relu()
     params = layer.get_training_parameters()
     this.assertDictEqual(params, {})
Esempio n. 23
0
    def generator(self, name, input, is_training=True):
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            ### 3 -> 64 64
            e0 = ly.conv2d(input, 64, strides=2, name='g_conv2d_0')
            e0 = ly.batch_normal(e0, name='g_bn_0', is_training=is_training)
            e0 = ly.relu(e0, alpha=0.2)

            ### 64 -> 128 32
            e1 = ly.conv2d(e0, 128, strides=2, name='g_conv2d_1')
            e1 = ly.batch_normal(e1, name='g_bn_1', is_training=is_training)
            e1 = ly.relu(e1, alpha=0.2)

            ### 128 -> 256 16
            e2 = ly.conv2d(e1, 256, strides=2, name='g_conv2d_2')
            e2 = ly.batch_normal(e2, name='g_bn_2', is_training=is_training)
            e2 = ly.relu(e2, alpha=0.2)

            ### 256 -> 512 8
            e3 = ly.conv2d(e2, 512, strides=2, name='g_conv2d_3')
            e3 = ly.batch_normal(e3, name='g_bn_3', is_training=is_training)
            e3 = ly.relu(e3, alpha=0.2)

            ### 512 -> 512 4
            e4 = ly.conv2d(e3, 512, strides=2, name='g_conv2d_4')
            e4 = ly.batch_normal(e4, name='g_bn_4', is_training=is_training)
            e4 = ly.relu(e4, alpha=0.2)

            ### 512 -> 512 2
            e5 = ly.conv2d(e4, 512, strides=2, name='g_conv2d_5')
            e5 = ly.batch_normal(e5, name='g_bn_5', is_training=is_training)
            e5 = ly.relu(e5, alpha=0.2)

            ### 512 -> 512 4
            d1 = ly.deconv2d(e5, 512, strides=2, name='g_deconv2d_1')
            d1 = ly.batch_normal(d1, name='g_bn_6', is_training=is_training)
            d1 = tf.nn.dropout(d1, keep_prob=0.5)
            d1 = tf.concat([d1, e4], axis=3)
            d1 = ly.relu(d1, alpha=0.2)

            ### 512 -> 512 8
            d2 = ly.deconv2d(d1, 512, strides=2, name='g_deconv2d_2')
            d2 = ly.batch_normal(d2, name='g_bn_7', is_training=is_training)
            d2 = tf.nn.dropout(d2, keep_prob=0.5)
            d2 = ly.relu(d2, alpha=0.2)
            d2 = tf.concat([d2, e3], axis=3)

            ### 512 -> 256 16
            d3 = ly.deconv2d(d2, 256, strides=2, name='g_deconv2d_3')
            d3 = ly.batch_normal(d3, name='g_bn_8', is_training=is_training)
            d3 = ly.relu(d3, alpha=0.2)
            d3 = tf.concat([d3, e2], axis=3)

            ### 256 -> 128 32
            d4 = ly.deconv2d(d3, 128, strides=2, name='g_deconv2d_4')
            d4 = ly.batch_normal(d4, name='g_bn_9', is_training=is_training)
            d4 = ly.relu(d4, alpha=0.2)
            d4 = tf.concat([d4, e1], axis=3)

            ### 128 -> 64 64
            d5 = ly.deconv2d(d4, 64, strides=2, name='g_deconv2d_5')
            d5 = ly.batch_normal(d5, name='g_bn_10', is_training=is_training)
            d5 = ly.relu(d5, alpha=0.2)
            d5 = tf.concat([d5, e0], axis=3)

            ### 64 -> 3 128
            d6 = ly.deconv2d(d5, 3, strides=2, name='g_deconv2d_6')
            d6 = ly.batch_normal(d6, name='g_bn_11', is_training=is_training)
            d6 = ly.relu(d6, alpha=0.2)

            return tf.nn.tanh(d6)
import unittest
    def test_constructor(this):

        # Test the constructor
        layer = layers.relu()
        this.assertIsInstance(layer, layers.relu)