Пример #1
0
def discriminator_o(x, hidden_units_d, reuse=False, parameters=None):

    with tf.compat.v1.variable_scope("discriminator_0") as scope:

        W_out_D_initializer = tf.compat.v1.constant_initializer(
            value=parameters['discriminator/W_out_D:0'])
        b_out_D_initializer = tf.compat.v1.constant_initializer(
            value=parameters['discriminator/b_out_D:0'])

        W_out_D = tf.compat.v1.get_variable(name='W_out_D',
                                            shape=[hidden_units_d, 1],
                                            initializer=W_out_D_initializer)
        b_out_D = tf.compat.v1.get_variable(name='b_out_D',
                                            shape=1,
                                            initializer=b_out_D_initializer)

        inputs = x

        # cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_d, state_is_tuple=True, reuse=reuse)
        cell = LSTMCell(num_units=hidden_units_d,
                        state_is_tuple=True,
                        reuse=reuse)
        rnn_outputs, rnn_states = tf.compat.v1.nn.dynamic_rnn(cell=cell,
                                                              dtype=tf.float32,
                                                              inputs=inputs)
        logits = tf.compat.v1.einsum('ijk,km', rnn_outputs,
                                     W_out_D) + b_out_D  # output weighted sum
        output = tf.compat.v1.nn.sigmoid(
            logits
        )  # y = 1 / (1 + exp(-x)). output activation [0, 1]. Probability??
        # sigmoid output ([0,1]), Probability?

    return output, logits
Пример #2
0
def discriminator(x, hidden_units_d, seq_length, batch_size, reuse=False, parameters=None, batch_mean=False):       # --> Meu, o que resultou nos parameters_1
    with tf.compat.v1.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()
        if parameters is None:
            # W_out_D = tf.get_variable(name='W_out_D', shape=[hidden_units_d, 1], initializer=tf.truncated_normal_initializer())
            # b_out_D = tf.get_variable(name='b_out_D', shape=1, initializer=tf.truncated_normal_initializer())
            W_out_D = tf.compat.v1.get_variable(name='W_out_D', shape=[hidden_units_d, 1], initializer=tf.compat.v1.glorot_normal_initializer())    # Xavier Initializer
            b_out_D = tf.compat.v1.get_variable(name='b_out_D', shape=1, initializer=tf.compat.v1.glorot_normal_initializer())                      # Xavier Initializer

            lstm_initializer = None
            bias_start = 1.0
        else:
            W_out_D = tf.compat.v1.constant_initializer(value=parameters['discriminator/W_out_D:0'])
            b_out_D = tf.compat.v1.constant_initializer(value=parameters['discriminator/b_out_D:0'])

        inputs = x

        if batch_mean:
            mean_over_batch = tf.compat.v1.stack([tf.compat.v1.reduce_mean(x, axis=0)] * batch_size, axis=0)
            inputs = tf.compat.v1.concat([x, mean_over_batch], axis=2)

        # cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_d, state_is_tuple=True, reuse=reuse)
        cell = LSTMCell(num_units=hidden_units_d, state_is_tuple=True, initializer=lstm_initializer, bias_start=bias_start, reuse=reuse)
        rnn_outputs, rnn_states = tf.compat.v1.nn.dynamic_rnn(cell=cell, dtype=tf.float32, inputs=inputs)
        logits = tf.compat.v1.einsum('ijk,km', rnn_outputs, W_out_D) + b_out_D # output weighted sum
        output = tf.compat.v1.nn.sigmoid(logits)
        # output = tf.nn.relu(logits)
    return output, logits
Пример #3
0
def encoderModel(z, hidden_units, seq_length, batch_size, latent_dim, reuse=False, parameters=None):
    with tf.compat.v1.variable_scope('encoder') as scope:
        if(parameters != None):
            W_initializer = tf.compat.v1.constant_initializer(value=parameters['encoder/W:0'])
            b_initializer = tf.compat.v1.constant_initializer(value=parameters['encoder/b:0'])
            lstm_initializer = tf.compat.v1.constant_initializer(value=parameters['encoder/rnn/lstm_cell/weights:0'])
            bias_start = parameters['encoder/rnn/lstm_cell/biases:0']
            W = tf.compat.v1.get_variable(name='W', shape=[hidden_units, latent_dim], initializer=W_initializer, trainable=False)
            b = tf.compat.v1.get_variable(name='b', shape=latent_dim, initializer=b_initializer, trainable=False)
        
        else:
            W_initializer = tf.compat.v1.truncated_normal_initializer()
            b_initializer = tf.compat.v1.truncated_normal_initializer()
            lstm_initializer = None
            bias_start = 1.0
            W = tf.compat.v1.get_variable(name='W', shape=[hidden_units, latent_dim], initializer=W_initializer, trainable=True)
            b = tf.compat.v1.get_variable(name='b', shape=latent_dim, initializer=b_initializer, trainable=True)

        inputs = z
        cell = LSTMCell(num_units=hidden_units, state_is_tuple=True, initializer=lstm_initializer, bias_start=bias_start, reuse=reuse)
        rnn_outputs, rnn_states = tf.compat.v1.nn.dynamic_rnn(cell=cell,dtype=tf.float32, sequence_length=[seq_length] * batch_size, inputs=inputs)
        rnn_outputs_2d = tf.compat.v1.reshape(rnn_outputs, [-1, hidden_units])
        logits_2d = tf.compat.v1.matmul(rnn_outputs_2d, W) + b #out put weighted sum
        # output_2d = tf.nn.relu(logits_2d) # logits operation [-1, 1]
        # output_2d = tf.compat.v1.nn.tanh(logits_2d)
        output_3d = tf.compat.v1.reshape(logits_2d, [-1, seq_length, latent_dim])
    return output_3d
Пример #4
0
def generator_o(z,
                hidden_units_g,
                seq_length,
                batch_size,
                num_generated_features,
                reuse=False,
                parameters=None,
                learn_scale=True):
    """
    If parameters are supplied, initialise as such
    """
    # It is important to specify different variable scopes for the LSTM cells.
    with tf.variable_scope("generator_o") as scope:

        W_out_G_initializer = tf.constant_initializer(
            value=parameters['generator/W_out_G:0'])
        b_out_G_initializer = tf.constant_initializer(
            value=parameters['generator/b_out_G:0'])
        try:
            scale_out_G_initializer = tf.constant_initializer(
                value=parameters['generator/scale_out_G:0'])
        except KeyError:
            scale_out_G_initializer = tf.constant_initializer(value=1)
            assert learn_scale
        lstm_initializer = tf.constant_initializer(
            value=parameters['generator/rnn/lstm_cell/weights:0'])
        bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_G = tf.get_variable(
            name='W_out_G',
            shape=[hidden_units_g, num_generated_features],
            initializer=W_out_G_initializer)
        b_out_G = tf.get_variable(name='b_out_G',
                                  shape=num_generated_features,
                                  initializer=b_out_G_initializer)
        scale_out_G = tf.get_variable(name='scale_out_G',
                                      shape=1,
                                      initializer=scale_out_G_initializer,
                                      trainable=False)

        inputs = z

        cell = LSTMCell(num_units=hidden_units_g,
                        state_is_tuple=True,
                        initializer=lstm_initializer,
                        bias_start=bias_start,
                        reuse=reuse)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
            cell=cell,
            dtype=tf.float32,
            sequence_length=[seq_length] * batch_size,
            inputs=inputs)
        rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
        logits_2d = tf.matmul(rnn_outputs_2d,
                              W_out_G) + b_out_G  #out put weighted sum
        output_2d = tf.nn.tanh(logits_2d)  # logits operation [-1, 1]
        output_3d = tf.reshape(output_2d,
                               [-1, seq_length, num_generated_features])
    return output_3d
Пример #5
0
def generator(z, hidden_units_g, seq_length, batch_size, num_generated_features, reuse=False, parameters=None, cond_dim=0, c=None, learn_scale=True):
    """
    If parameters are supplied, initialise as such
    """
    with tf.variable_scope("generator") as scope:
        if reuse:
            scope.reuse_variables()
        if parameters is None:
            W_out_G_initializer = tf.truncated_normal_initializer()
            b_out_G_initializer = tf.truncated_normal_initializer()
            scale_out_G_initializer = tf.constant_initializer(value=1.0)
            lstm_initializer = None
            bias_start = 1.0
        else:
            W_out_G_initializer = tf.constant_initializer(value=parameters['generator/W_out_G:0'])
            b_out_G_initializer = tf.constant_initializer(value=parameters['generator/b_out_G:0'])
            try:
                scale_out_G_initializer = tf.constant_initializer(value=parameters['generator/scale_out_G:0'])
            except KeyError:
                scale_out_G_initializer = tf.constant_initializer(value=1)
                assert learn_scale
            lstm_initializer = tf.constant_initializer(value=parameters['generator/rnn/lstm_cell/weights:0'])
            bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_G = tf.get_variable(name='W_out_G', shape=[hidden_units_g, num_generated_features], initializer=W_out_G_initializer)
        b_out_G = tf.get_variable(name='b_out_G', shape=num_generated_features, initializer=b_out_G_initializer)
        scale_out_G = tf.get_variable(name='scale_out_G', shape=1, initializer=scale_out_G_initializer, trainable=learn_scale)
        if cond_dim > 0:
            # CGAN!
            assert not c is None
            repeated_encoding = tf.stack([c]*seq_length, axis=1)
            inputs = tf.concat([z, repeated_encoding], axis=2)

            #repeated_encoding = tf.tile(c, [1, tf.shape(z)[1]])
            #repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(z)[0], tf.shape(z)[1], cond_dim])
            #inputs = tf.concat([repeated_encoding, z], 2)
        else:
            inputs = z

        cell = LSTMCell(num_units=hidden_units_g,
                           state_is_tuple=True,
                           initializer=lstm_initializer,
                           bias_start=bias_start,
                           reuse=reuse)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
            cell=cell,
            dtype=tf.float32,
            sequence_length=[seq_length]*batch_size,
            inputs=inputs)
        rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
        logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G
#        output_2d = tf.multiply(tf.nn.tanh(logits_2d), scale_out_G)
        output_2d = tf.nn.tanh(logits_2d)
        output_3d = tf.reshape(output_2d, [-1, seq_length, num_generated_features])
    return output_3d
Пример #6
0
def generator(z, hidden_units_g, seq_length, batch_size, num_signals, reuse=False, parameters=None, learn_scale=True):

    """
    If parameters are supplied, initialise as such
    """
    with tf.variable_scope("generator") as scope:
        if reuse:
            scope.reuse_variables()
        if parameters is None:
            W_out_G_initializer = tf.truncated_normal_initializer()
            b_out_G_initializer = tf.truncated_normal_initializer()
            scale_out_G_initializer = tf.constant_initializer(value=1.0)
            lstm_initializer = None
            bias_start = 1.0
        else:
            W_out_G_initializer = tf.constant_initializer(value=parameters['generator/W_out_G:0'])
            b_out_G_initializer = tf.constant_initializer(value=parameters['generator/b_out_G:0'])
            try:
                scale_out_G_initializer = tf.constant_initializer(value=parameters['generator/scale_out_G:0'])
            except KeyError:
                scale_out_G_initializer = tf.constant_initializer(value=1)
                assert learn_scale
            lstm_initializer = tf.constant_initializer(value=parameters['generator/rnn/lstm_cell/weights:0'])
            bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_G = tf.get_variable(name='W_out_G', shape=[hidden_units_g, num_signals],
                                  initializer=W_out_G_initializer)
        b_out_G = tf.get_variable(name='b_out_G', shape=num_signals, initializer=b_out_G_initializer)
        scale_out_G = tf.get_variable(name='scale_out_G', shape=1, initializer=scale_out_G_initializer,
                                      trainable=learn_scale)
        # inputs
        inputs = z #(500,30,15)


        cell = LSTMCell(num_units=hidden_units_g,
                        state_is_tuple=True,
                        initializer=lstm_initializer,
                        bias_start=bias_start,
                        reuse=reuse)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn( #rnn_outputs.shape = (500,30,100)
            cell=cell,
            dtype=tf.float32,
            sequence_length=[seq_length] * batch_size,
            inputs=inputs)

        rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
        logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G #out put weighted sum
        #        output_2d = tf.multiply(tf.nn.tanh(logits_2d), scale_out_G)
        output_2d = tf.nn.tanh(logits_2d) # logits operation [-1, 1]
        output_3d = tf.reshape(output_2d, [-1, seq_length, num_signals])

    return output_3d
Пример #7
0
def discriminatorModelPred(x, hidden_units_d, reuse=False, parameters=None):
    with tf.compat.v1.variable_scope("discriminator_pred") as scope:
        W_out_D_initializer = tf.compat.v1.constant_initializer(value=parameters['discriminator/W_out_D:0'])
        b_out_D_initializer = tf.compat.v1.constant_initializer(value=parameters['discriminator/b_out_D:0'])
        W_out_D = tf.compat.v1.get_variable(name='W_out_D', shape=[hidden_units_d, 1],  initializer=W_out_D_initializer)
        b_out_D = tf.compat.v1.get_variable(name='b_out_D', shape=1, initializer=b_out_D_initializer)
        lstm_initializer = tf.compat.v1.constant_initializer(value=parameters['discriminator/rnn/lstm_cell/weights:0'])
        bias_start = parameters['discriminator/rnn/lstm_cell/biases:0']

        inputs = x
        cell = LSTMCell(num_units=hidden_units_d, state_is_tuple=True, initializer=lstm_initializer, bias_start=bias_start, reuse=reuse)
        rnn_outputs, rnn_states = tf.compat.v1.nn.dynamic_rnn(cell=cell, dtype=tf.float32, inputs=inputs)
        logits = tf.compat.v1.einsum('ijk,km', rnn_outputs, W_out_D) + b_out_D
        output = tf.compat.v1.nn.sigmoid(logits)
    return output, logits
Пример #8
0
def generatorModel(z, hidden_units_g, seq_length, batch_size, num_generated_features, reuse=False, parameters=None):
    with tf.compat.v1.variable_scope('generator') as scope:
        W_out_G_initializer = tf.compat.v1.constant_initializer(value=parameters['generator/W_out_G:0'])
        b_out_G_initializer = tf.compat.v1.constant_initializer(value=parameters['generator/b_out_G:0'])
        lstm_initializer = tf.compat.v1.constant_initializer(value=parameters['generator/rnn/lstm_cell/weights:0'])
        bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_G = tf.compat.v1.get_variable(name='W_out_G', shape=[hidden_units_g, num_generated_features], initializer=W_out_G_initializer)
        b_out_G = tf.compat.v1.get_variable(name='b_out_G', shape=num_generated_features, initializer=b_out_G_initializer)

        inputs = z
        cell = LSTMCell(num_units=hidden_units_g, state_is_tuple=True, initializer=lstm_initializer, bias_start=bias_start, reuse=reuse)
        rnn_outputs, rnn_states = tf.compat.v1.nn.dynamic_rnn(cell=cell,dtype=tf.float32, sequence_length=[seq_length] * batch_size, inputs=inputs)
        rnn_outputs_2d = tf.compat.v1.reshape(rnn_outputs, [-1, hidden_units_g])
        logits_2d = tf.compat.v1.matmul(rnn_outputs_2d, W_out_G) + b_out_G
        output_3d = tf.compat.v1.reshape(logits_2d, [-1, seq_length, num_generated_features])
        output_2d = tf.compat.v1.nn.tanh(logits_2d)
        output_3d_l = tf.compat.v1.reshape(output_2d, [-1, seq_length, num_generated_features])
    return output_3d, output_3d_l
def discriminator(x,
                  hidden_units_d,
                  seq_length,
                  batch_size,
                  reuse=False,
                  parameters=None,
                  batch_mean=False):
    with tf.variable_scope("discriminator") as scope:
        if reuse:
            scope.reuse_variables()
        if parameters is None:
            W_out_D_initializer = tf.truncated_normal_initializer()
            b_out_D_initializer = tf.truncated_normal_initializer()
            lstm_initializer = None
            bias_start = 1
        else:
            W_out_D_initializer = tf.constant_initializer(
                value=parameters['discriminator/W_out_D:0'])
            b_out_G_initializer = tf.constant_initializer(
                value=parameters['discriminator/b_out_D:0'])
            lstm_initializer = tf.constant_initializer(
                value=parameters['discriminator/rnn/lstm_cell/weights:0'])
            bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_D = tf.get_variable(name='W_out_D',
                                  shape=[hidden_units_d, 1],
                                  initializer=W_out_D_initializer)
        b_out_D = tf.get_variable(name='b_out_D',
                                  shape=1,
                                  initializer=b_out_D_initializer)
        '''
        if parameters is None:
            W_out_D = tf.get_variable(name='W_out_D', shape=[hidden_units_d, 1],
                                      initializer=tf.truncated_normal_initializer())
            b_out_D = tf.get_variable(name='b_out_D', shape=1,
                                      initializer=tf.truncated_normal_initializer())

        else:
            W_out_D = tf.constant_initializer(value=parameters['discriminator/W_out_D:0'])
            b_out_D = tf.constant_initializer(value=parameters['discriminator/b_out_D:0'])
        '''
        # inputs
        inputs = x

        # add the average of the inputs to the inputs (mode collapse?
        if batch_mean:
            mean_over_batch = tf.stack([tf.reduce_mean(x, axis=0)] *
                                       batch_size,
                                       axis=0)
            inputs = tf.concat([x, mean_over_batch], axis=2)

        # cell = tf.contrib.rnn.LSTMCell(num_units=hidden_units_d,state_is_tuple=True, reuse=reuse)
        cell = LSTMCell(num_units=hidden_units_d,
                        state_is_tuple=True,
                        initializer=lstm_initializer,
                        bias_start=bias_start,
                        reuse=reuse)
        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(cell=cell,
                                                    dtype=tf.float32,
                                                    inputs=inputs)
        # logit_final = tf.matmul(rnn_outputs[:, -1], W_final_D) + b_final_D
        logits = tf.einsum('ijk,km', rnn_outputs,
                           W_out_D) + b_out_D  # output weighted sum
        # real logits or actual output layer?
        # logit is a function that maps probabilities ([0,1]) to ([-inf,inf]) ?

        output = tf.nn.sigmoid(
            logits
        )  # y = 1 / (1 + exp(-x)). output activation [0, 1]. Probability??
        # sigmoid output ([0,1]), Probability?

    return output, logits
Пример #10
0
def generator(z,
              hidden_units_g,
              seq_length,
              batch_size,
              num_generated_features,
              latent_dim,
              reuse=False,
              parameters=None,
              cond_dim=0,
              c=None,
              learn_scale=True):
    """
    If parameters are supplied, initialise as such
    """
    with tf.variable_scope("generator") as scope:
        if reuse:
            scope.reuse_variables()

        ##
        if parameters is None:
            W_out_G_initializer = tf.truncated_normal_initializer()
            b_out_G_initializer = tf.truncated_normal_initializer()
            scale_out_G_initializer = tf.constant_initializer(value=1.0)
            lstm_initializer = None
            bias_start = 1.0
        else:
            W_out_G_initializer = tf.constant_initializer(
                value=parameters['generator/W_out_G:0'])
            b_out_G_initializer = tf.constant_initializer(
                value=parameters['generator/b_out_G:0'])
            try:
                scale_out_G_initializer = tf.constant_initializer(
                    value=parameters['generator/scale_out_G:0'])
            except KeyError:
                scale_out_G_initializer = tf.constant_initializer(value=1)
                assert learn_scale
            lstm_initializer = tf.constant_initializer(
                value=parameters['generator/rnn/lstm_cell/weights:0'])
            bias_start = parameters['generator/rnn/lstm_cell/biases:0']

        W_out_G = tf.get_variable(name='W_out_G',
                                  shape=[hidden_units_g, 5],
                                  initializer=W_out_G_initializer)
        b_out_G = tf.get_variable(name='b_out_G',
                                  shape=5,
                                  initializer=b_out_G_initializer)
        scale_out_G = tf.get_variable(name='scale_out_G',
                                      shape=1,
                                      initializer=scale_out_G_initializer,
                                      trainable=learn_scale)
        if cond_dim > 0:
            # CGAN!
            assert not c is None
            repeated_encoding = tf.stack([c] * seq_length, axis=1)
            inputs = tf.concat([z, repeated_encoding], axis=2)

            #repeated_encoding = tf.tile(c, [1, tf.shape(z)[1]])
            #repeated_encoding = tf.reshape(repeated_encoding, [tf.shape(z)[0], tf.shape(z)[1], cond_dim])
            #inputs = tf.concat([repeated_encoding, z], 2)
        else:
            # inputs = z
            inputs = tf.reshape(z, [-1, 60, 75, 1])

        keep_prob = 0.9

        ### First convolutional layer
        conv_1 = conv2d_layer(inputs, [5, 5, 1, 32], [32], 1, [1, 3, 3, 1],
                              [1, 1, 1, 1], [1, 3, 3, 1], 'SAME')
        # drop1 = tf.nn.dropout(inputs, keep_prob)
        print("Generator conv1 output shape: {}".format(conv_1.shape))

        ### Second convolutional layern
        conv_2 = conv2d_layer(conv_1, [4, 4, 32, 64], [64], 2, [1, 2, 2, 1],
                              [1, 1, 1, 1], [1, 2, 2, 1], 'SAME')
        # drop2 = tf.nn.dropout(conv_2, keep_prob)
        print("Generator conv2 output shape: {}".format(conv_2.shape))

        ### Third convolutional layer
        conv_3 = conv2d_layer(conv_2, [2, 2, 64, 64], [64], 3, [1, 2, 2, 1],
                              [1, 1, 1, 1], [1, 2, 2, 1], 'SAME')
        # drop3 = tf.nn.dropout(conv_3, keep_prob)
        print("Generator conv3 output shape: {}".format(conv_3.shape))

        ### Fourth convolutional layer
        conv_4 = conv2d_layer(conv_3, [2, 2, 64, 128], [1], 4, [1, 2, 2, 1],
                              [1, 1, 1, 1], [1, 2, 2, 1], 'SAME')
        # drop4 = tf.nn.dropout(conv_4, keep_prob)
        print("Generator conv4 output shape: {}".format(conv_4.shape))

        ### Fifth convolutional layer
        conv_5 = conv2d_layer(conv_4, [2, 2, 128, 1], [1], 5, [1, 2, 2, 1],
                              [1, 1, 1, 1], [1, 2, 2, 1], 'SAME')
        print("Generator conv5 output shape: {}".format(conv_5.shape))

        cell = LSTMCell(num_units=hidden_units_g,
                        state_is_tuple=True,
                        initializer=lstm_initializer,
                        bias_start=bias_start,
                        reuse=reuse)

        rnn_outputs, rnn_states = tf.nn.dynamic_rnn(
            cell=cell,
            dtype=tf.float32,
            sequence_length=[seq_length - latent_dim] * batch_size,
            inputs=tf.reshape(conv_5, [-1, 2, 2]))

        rnn_outputs_2d = tf.reshape(rnn_outputs, [-1, hidden_units_g])
        logits_2d = tf.matmul(rnn_outputs_2d, W_out_G) + b_out_G
        #        output_2d = tf.multiply(tf.nn.tanh(logits_2d), scale_out_G)
        output_2d = tf.nn.tanh(logits_2d)
        output_3d = tf.reshape(output_2d,
                               [-1, 2, 5, 1])  #num_generated_features])

        ## deconv1
        deconv1 = tf.nn.conv2d_transpose(
            output_3d,
            tf.get_variable(
                'dw1',
                shape=[4, 4, 64, 1],
                initializer=tf.contrib.layers.xavier_initializer()),
            strides=[1, 3, 3, 1],
            output_shape=[28, 5, 13, 64],
            padding='SAME',
            name='deconv1')
        de_relu1 = tf.nn.relu(deconv1, 'de_relu1')

        ## deconv2
        deconv2 = tf.nn.conv2d_transpose(
            de_relu1,
            tf.get_variable(
                'dw2',
                shape=[5, 5, 32, 64],
                initializer=tf.contrib.layers.xavier_initializer()),
            strides=[1, 2, 2, 1],
            output_shape=[28, 10, 25, 32],
            padding='SAME',
            name='deconv2')
        de_relu2 = tf.nn.relu(deconv2, 'de_relu2')

        ## deconv3
        deconv3 = tf.nn.conv2d_transpose(
            de_relu2,
            tf.get_variable(
                'dw3',
                shape=[5, 5, 1, 32],
                initializer=tf.contrib.layers.xavier_initializer()),
            strides=[1, 2, 3, 1],
            output_shape=[28, 20, 75, 1],
            padding='SAME',
            name='deconv2')
        de_relu3 = tf.nn.relu(deconv3, 'de_relu3')

        print(latent_dim)
        print("Final deconv shape: {}".format(de_relu3.shape))

        fin_output = tf.reshape(de_relu3,
                                [-1, latent_dim, num_generated_features])
        print("Generator final shape: {}".format(fin_output.shape))

        # print(output_2d.shape)
    #     output_3d = tf.reshape(output_2d, [-1, 20, num_generated_features])
    # #     print(output_3d.shape)
    # print('-----------------------------------------------------------------------------------------------------------------------')
    #return output_3d
    return fin_output