def gen(Z, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
    Gl1 = relu(batchnorm(T.dot(Z, w1), g=g1, b=b1))
    Gl1 = Gl1.reshape((Gl1.shape[0], Channel[-1], Convlayersize[-1],
                       Convlayersize[-1], Convlayersize[-1]))

    input_shape = (None, None, Convlayersize[-1], Convlayersize[-1],
                   Convlayersize[-1])
    filter_shape = (Channel[-1], Channel[-2], kernal[-1], kernal[-1],
                    kernal[-1])

    Gl2 = relu(
        batchnorm(conv(Gl1,
                       w2,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g2,
                  b=b2))

    input_shape = (None, None, Convlayersize[-2], Convlayersize[-2],
                   Convlayersize[-2])
    filter_shape = (Channel[-2], Channel[-3], kernal[-2], kernal[-2],
                    kernal[-2])

    Gl3 = relu(
        batchnorm(conv(Gl2,
                       w3,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g3,
                  b=b3))

    input_shape = (None, None, Convlayersize[-3], Convlayersize[-3],
                   Convlayersize[-3])
    filter_shape = (Channel[-3], Channel[-4], kernal[-3], kernal[-3],
                    kernal[-3])

    Gl4 = relu(
        batchnorm(conv(Gl3,
                       w4,
                       filter_shape=filter_shape,
                       input_shape=input_shape,
                       conv_mode='deconv'),
                  g=g4,
                  b=b4))

    input_shape = (None, None, Convlayersize[-4], Convlayersize[-4],
                   Convlayersize[-4])
    filter_shape = (Channel[-4], Channel[-5], kernal[-4], kernal[-4],
                    kernal[-4])

    GlX = sigmoid(
        conv(Gl4,
             wx,
             filter_shape=filter_shape,
             input_shape=input_shape,
             conv_mode='deconv'))
    return GlX
def encoder(X, w1, g1, b1, w2, g2, b2, w3, g3, b3, w4, g4, b4, wz):
    filter_shape = (Channel[1], Channel[0], kernal[0], kernal[0], kernal[0])
    Dl1 = lrelu(batchnorm(conv(X, w1, filter_shape=filter_shape), g=g1, b=b1))

    filter_shape = (Channel[2], Channel[1], kernal[1], kernal[1], kernal[1])
    Dl2 = lrelu(batchnorm(conv(Dl1, w2, filter_shape=filter_shape), g=g2,
                          b=b2))

    filter_shape = (Channel[3], Channel[2], kernal[2], kernal[2], kernal[2])
    Dl3 = lrelu(batchnorm(conv(Dl2, w3, filter_shape=filter_shape), g=g3,
                          b=b3))

    filter_shape = (Channel[4], Channel[3], kernal[3], kernal[3], kernal[3])
    Dl4 = lrelu(batchnorm(conv(Dl3, w4, filter_shape=filter_shape), g=g4,
                          b=b4))
    Dl4 = T.flatten(Dl4, 2)
    DlZ = sigmoid(T.dot(Dl4, wz))
    return DlZ
    def gruCNN_manual(self, X, h_t0, index):
        #        pdb.set_trace()

        fw = self.filter_size_gruCNN
        r_gru = self.num_channel_gruCNN
        if index == 0:
            r = 1
        else:
            r = r_gru
        with tf.variable_scope('block{}'.format(index)):
            # z_t implement
            #            pdb.set_trace()
            W_zx = get_weight_variable('W_zx', (fw[0], fw[1], r, r_gru))
            W_zh = get_weight_variable('W_zh', (fw[0], fw[1], r_gru, r_gru))
            z_t = conv(X, W_zx) + conv(h_t0, W_zh)
            z_t = tf.sigmoid(z_t)

            # r_t implement
            W_rx = get_weight_variable('W_rx', (fw[0], fw[1], r, r_gru))
            W_rh = get_weight_variable('W_rh', (fw[0], fw[1], r_gru, r_gru))
            r_t = conv(X, W_rx) + conv(h_t0, W_rh)
            r_t = tf.sigmoid(r_t)

            # h_t_hat implementation
            W_hx = get_weight_variable('W_hx', (fw[0], fw[1], r, r_gru))
            W_hh = get_weight_variable('W_hh', (fw[0], fw[1], r_gru, r_gru))
            r_t_h_t = tf.multiply(r_t, h_t0)
            h_t_hat = conv(X, W_hx) + conv(r_t_h_t, W_hh)
            h_t_hat = tf.tanh(h_t_hat)

            h_t = tf.multiply(z_t, h_t0) + tf.multiply((1 - z_t), h_t_hat)
            o_t = self.parametric_relu(h_t, r_gru)

    #        h_t=tf.layers.dropout(h_t,rate=0.2)
        return o_t, o_t