def construct_graph(self, input_seq_pl, dropout):
        """ Contruct a tensofrlow graph for the network

        Args:
          input_seq_pl:     tf placeholder for ae input data [batch_size, sequence_length, DoF]
          dropout:          how much of the input neurons will be activated, value in range [0,1]
        Returns:
          Tensor of output
        """

        network_input = simulate_missing_markets(input_seq_pl, self._mask,
                                                 self.default_value)

        if FLAGS.reccurent is False:
            last_output = network_input[:, 0, :]

            numb_layers = self.num_hidden_layers + 1

            # Pass through the network
            for i in range(numb_layers):
                # First - Apply Dropout
                last_output = tf.nn.dropout(last_output, dropout)

                w = self._w(i + 1)
                b = self._b(i + 1)

                last_output = self._activate(last_output, w, b)

            output = tf.reshape(last_output, [
                self.batch_size, 1,
                FLAGS.frame_size * FLAGS.amount_of_frames_as_input
            ])

        else:
            output, last_states = tf.nn.dynamic_rnn(cell=self._RNN_cell,
                                                    dtype=tf.float32,
                                                    inputs=network_input)

            # Reuse variables
            # so that we can use the same LSTM both for training and testing
            tf.get_variable_scope().reuse_variables()

        return output
    def construct_tcn_graph(self, input_seq_pl, dropout):

        """ Contruct a tensorflow graph for the network

        Args:
          input_seq_pl:     tf placeholder for ae input data [batch_size, sequence_length, DoF]
          dropout:          how much of the input neurons will be activated, value in range [0,1]
        Returns:
          Tensor of output
        """

        network_input = simulate_missing_markets(input_seq_pl, self._mask, self.default_value)

        if FLAGS.reccurent is False:
            last_output = network_input[:, 0, :]

            numb_layers = self.num_hidden_layers + 1

            # Pass through the network
            for i in range(numb_layers):
                # First - Apply Dropout
                last_output = tf.nn.dropout(last_output, dropout)

                w = self._w(i + 1)
                b = self._b(i + 1)

                last_output = self._activate(last_output, w, b)

            output = tf.reshape(last_output, [self.batch_size, 1,
                                              FLAGS.frame_size * FLAGS.amount_of_frames_as_input])

        else:            
           # output, last_states = tf.nn.dynamic_rnn(
           #     cell=self._RNN_cell,
           #     dtype=tf.float32,
           #     inputs=network_input)

           z_prior = tf.convert_to_tensor(tf.unstack(network_input, FLAGS.chunk_length, 1))
           print("z_prior=", z_prior.shape)
           
           with tf.variable_scope("tcn") as tcn:
              #g_trainingState = tf.placeholder(tf.bool)
              #g_res, g_states = tf.contrib.rnn.static_rnn(self._gen_lstm_cell, z_prior, dtype=tf.float32)

              #dilations = process_dilations(FLAGS.dilations)
              #input_layer = Input(shape=(max_len, num_feat))
              t_res = TCN(nb_filters=123,
                          kernel_size=2,
                          nb_stacks=1,
                          dilations=[2 ** i for i in range(6)],
                          padding='causal',
                          use_skip_connections=False,
                          dropout_rate=FLAGS.dropout,
                          return_sequences=True,
                          activation='relu',
                          name='tcn',
                          kernel_initializer='he_normal',
                          use_batch_norm=True
                          )(z_prior)
              t_res = Dense(123)(t_res)
              t_res = Activation('relu')(t_res)
              print('t_res.shape=', t_res.shape)

              g_res, g_states = tf.nn.dynamic_rnn(self._gen_lstm_cell, t_res, dtype=tf.float32)
              t_last_output = g_res

              #g_w_1 = self._w("g" + str(1))
              #g_b_1 = self._b("g" + str(1))
              #
              #g_w_2 = self._w("g" + str(2))
              #g_b_2 = self._b("g" + str(2))
              #
              #g_hidden_1 = tf.nn.bias_add(tf.matmul(g_last_output, g_w_1),g_b_1)
              #g_bnHidden_1 = tf.layers.batch_normalization(g_hidden_1, training = True)
              #g_last_output_1 = tf.nn.leaky_relu(g_bnHidden_1)
              #
              #g_hidden_2 = tf.nn.bias_add(tf.matmul(g_last_output_1, g_w_2),g_b_2)
              #g_bnHidden_2 = tf.layers.batch_normalization(g_hidden_2, training = True)
              #g_last_output_2 = tf.nn.leaky_relu(g_bnHidden_2)
              
              #for i in range(len(self._gen_shape)-1):
              #  g_w = self._w("g" + str(i + 1))
              #  g_b = self._b("g" + str(i + 1))
              #  g_hidden = tf.nn.bias_add(tf.matmul(g_last_output, g_w),g_b)
              #  g_hidden_mean, g_hidden_variance = tf.nn.moments(g_hidden, axes = [i for i in range(len(g_hidden.shape))], keep_dims = True)
              #  g_bnHidden = tf.nn.batch_normalization(g_hidden, mean = g_hidden_mean,
              #                                         variance = g_hidden_variance,
              #                                         variance_epsilon = FLAGS.variance_epsilon,
              #                                         offset = None,
              #                                         scale = None)
                #g_bnHidden = tf.layers.batch_normalization(g_hidden, training = True)
              #  g_last_output = tf.nn.leaky_relu(g_bnHidden)
              #  print(g_last_output)

              #g_output = tf.nn.tanh(g_last_output)
              t_output = t_last_output
          
              t_params = [v for v in tf.global_variables() if v.name.startswith(tcn.name)]
              
              t_output = tf.reshape(t_output, [self.batch_size, FLAGS.chunk_length,
                                              FLAGS.frame_size * FLAGS.amount_of_frames_as_input])
              
           with tf.name_scope("tcn_params"):
              for param in t_params:
                  variable_summaries(param)

            # Reuse variables
            # so that we can use the same LSTM both for training and testing
           tf.get_variable_scope().reuse_variables()

           return t_output, t_params
    def construct_gen_graph(self, input_seq_pl, dropout):
        """ Contruct a tensorflow graph for the network

        Args:
          input_seq_pl:     tf placeholder for ae input data [batch_size, sequence_length, DoF]
          dropout:          how much of the input neurons will be activated, value in range [0,1]
        Returns:
          Tensor of output
        """

        network_input = simulate_missing_markets(input_seq_pl, self._mask,
                                                 self.default_value)

        if FLAGS.reccurent is False:
            last_output = network_input[:, 0, :]

            numb_layers = self.num_hidden_layers + 1

            # Pass through the network
            for i in range(numb_layers):
                # First - Apply Dropout
                last_output = tf.nn.dropout(last_output, dropout)

                w = self._w(i + 1)
                b = self._b(i + 1)

                last_output = self._activate(last_output, w, b)

            output = tf.reshape(last_output, [
                self.batch_size, 1,
                FLAGS.frame_size * FLAGS.amount_of_frames_as_input
            ])

        else:
            # output, last_states = tf.nn.dynamic_rnn(
            #     cell=self._RNN_cell,
            #     dtype=tf.float32,
            #     inputs=network_input)

            z_prior = tf.convert_to_tensor(
                tf.unstack(network_input, FLAGS.chunk_length, 1))

            with tf.variable_scope("gen") as gen:
                g_trainingState = tf.placeholder(tf.bool)
                #g_res, g_states = tf.contrib.rnn.static_rnn(self._gen_lstm_cell, z_prior, dtype=tf.float32)
                g_res, g_states = tf.nn.dynamic_rnn(self._gen_lstm_cell,
                                                    z_prior,
                                                    dtype=tf.float32)
                #print("g_res:",g_res)
                #print("g_states:",g_states)
                g_last_output = g_res

                #g_w_1 = self._w("g" + str(1))
                #g_b_1 = self._b("g" + str(1))
                #
                #g_w_2 = self._w("g" + str(2))
                #g_b_2 = self._b("g" + str(2))
                #
                #g_hidden_1 = tf.nn.bias_add(tf.matmul(g_last_output, g_w_1),g_b_1)
                #g_bnHidden_1 = tf.layers.batch_normalization(g_hidden_1, training = True)
                #g_last_output_1 = tf.nn.leaky_relu(g_bnHidden_1)
                #
                #g_hidden_2 = tf.nn.bias_add(tf.matmul(g_last_output_1, g_w_2),g_b_2)
                #g_bnHidden_2 = tf.layers.batch_normalization(g_hidden_2, training = True)
                #g_last_output_2 = tf.nn.leaky_relu(g_bnHidden_2)

                for i in range(len(self._gen_shape) - 1):
                    g_last_output = tf.nn.dropout(g_last_output, dropout)
                    g_w = self._w("g" + str(i + 1))
                    g_b = self._b("g" + str(i + 1))
                    g_hidden = tf.nn.bias_add(tf.matmul(g_last_output, g_w),
                                              g_b)
                    g_hidden_mean, g_hidden_variance = tf.nn.moments(
                        g_hidden,
                        axes=[i for i in range(len(g_hidden.shape))],
                        keep_dims=True)
                    g_bnHidden = tf.nn.batch_normalization(
                        g_hidden,
                        mean=g_hidden_mean,
                        variance=g_hidden_variance,
                        variance_epsilon=FLAGS.variance_epsilon,
                        offset=None,
                        scale=None)
                    #g_bnHidden = tf.layers.batch_normalization(g_hidden, training = True)
                    g_last_output = tf.nn.leaky_relu(g_bnHidden)
                    print(g_last_output)

                g_output = tf.nn.tanh(g_last_output)

                g_params = [
                    v for v in tf.global_variables()
                    if v.name.startswith(gen.name)
                ]

                g_output = tf.reshape(g_output, [
                    self.batch_size, FLAGS.chunk_length,
                    FLAGS.frame_size * FLAGS.amount_of_frames_as_input
                ])

            with tf.name_scope("gen_params"):
                for param in g_params:
                    variable_summaries(param)

            # Reuse variables
            # so that we can use the same LSTM both for training and testing
            tf.get_variable_scope().reuse_variables()

            return g_output  #, g_params