def __init__(self, shape, sess, batch_size, variance_coef, data_info):
        """Autoencoder initializer

        Args:
          shape:          list of ints specifying
                          num input, hidden1 units,...hidden_n units, num outputs
          sess:           tensorflow session object to use
          batch_size:     batch size
          varience_coef:  multiplicative factor for the variance of noise wrt the variance of data
          data_info:      key information about the dataset
        """

        AutoEncoder.__init__(self, len(shape) - 2, batch_size, FLAGS.chunk_length, sess, data_info)

        self.__shape = shape  # [input_dim,hidden1_dim,...,hidden_n_dim,output_dim]

        self._gen_shape = FLAGS.gen_layers

        self._dis_shape = FLAGS.dis_layers

        self.__variables = {}


        with sess.graph.as_default():

            with tf.variable_scope("AE_Variables"):

                ##############        SETUP VARIABLES       #####################################

                #for i in range(len(self._gen_shape)-1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    #self._gen_create_variables(i, FLAGS.Weight_decay)

                    
                #for i in range(len(self._dis_shape)-1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    #self._dis_create_variables(i, FLAGS.Weight_decay)


                if FLAGS.reccurent:

                    # Define LSTM cell
                    lstm_sizes = self.__shape[1:]

                    def lstm_cell(size):
                        basic_cell = tf.contrib.rnn.BasicLSTMCell(
                            size, forget_bias=1.0, state_is_tuple=True, reuse=tf.AUTO_REUSE)
                        # Apply dropout on the hidden layers
                        if size != self.__shape[-1]:
                            hidden_cell = tf.contrib.rnn.DropoutWrapper\
                                (cell=basic_cell, output_keep_prob=FLAGS.dropout)
                            return hidden_cell
                        else:
                            return basic_cell

                    self._gen_lstm_cell = tf.contrib.rnn.MultiRNNCell(
                        [lstm_cell(sz) for sz in lstm_sizes], state_is_tuple=True)
                    #self._dis_lstm_cell = tf.contrib.rnn.MultiRNNCell(
                    #    [lstm_cell(sz) for sz in lstm_sizes], state_is_tuple=True)
                    

                ##############        DEFINE THE NETWORK     ###################################

                # Declare a mask for simulating missing_values
                self._mask = tf.placeholder(dtype=tf.float32,
                                            shape=[FLAGS.batch_size, FLAGS.chunk_length,
                                                   FLAGS.frame_size *
                                                   FLAGS.amount_of_frames_as_input],
                                            name='Mask_of_mis_markers')
                self._mask_generator = self.binary_random_matrix_generator(FLAGS.missing_rate)

                # Reminder: we use Denoising AE
                # (http://www.jmlr.org/papers/volume11/vincent10a/vincent10a.pdf)

                ''' 1 - Setup network for TRAINing '''
                # Input noisy data and reconstruct the original one
                self._input_ = add_noise(self._train_batch, variance_coef, data_info._data_sigma)
                self._target_ = self._train_batch

                # Define output and loss for the training data
                # self._output = self.construct_graph(self._input_, FLAGS.dropout)
                # self._output, self._gen_params_ = self.construct_gen_graph(self._input_, FLAGS.dropout)
                #self._output, self._gen_params_ = self.construct_gen_graph(self._input_, FLAGS.dropout)
                self._output, self._tcn_params_ = self.construct_tcn_graph(self._input_, FLAGS.dropout)
                #self._y_data_, self._y_generated_, self._dis_params_ = self.construct_dis_graph(self._input_, self._output, FLAGS.dropout)
                #self._y_data_, self._y_generated_, self._dis_params_ = self.construct_dis_graph(self._target_, self._output, FLAGS.dropout)
                print("self._output=",self._output)
                #print("self._target_=",self._target_)
                print("self._tcn_params_=",self._tcn_params_)
                self._reconstruction_loss = loss_reconstruction(self._output, self._target_, self.max_val)
                # self._gen_loss = tf.reduce_mean(tf.square(- tf.log(self._y_generated_)))
                # self._dis_loss = tf.reduce_mean(tf.square(- (tf.log(self._y_data_) + tf.log(1 - self._y_generated_))))
                #self._gen_loss = - tf.reduce_mean(self._y_generated_)
                #self._dis_loss = - tf.reduce_mean(self._y_data_) + tf.reduce_mean(self._y_generated_)

                # Gradient Penalty
                #self.epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0., maxval=1.)
                #self.epsilon = tf.random_uniform(shape=[self.batch_size, 1, 1], minval=0., maxval=1.)
                #X_hat = self._target_ + self.epsilon * (self._output - self._target_)
                #D_X_hat, _, __ = self.construct_dis_graph(X_hat, self._output, FLAGS.dropout)
                #grad_D_X_hat = tf.gradients(D_X_hat, [X_hat])[0]
                #red_idx = [i for i in range(1, X_hat.shape.ndims)]
                #slopes = tf.sqrt(1e-8 + tf.reduce_sum(tf.square(grad_D_X_hat), reduction_indices=red_idx))
                #gradient_penalty = tf.reduce_mean((slopes - 1.) ** 2)
                #self._dis_loss = self._dis_loss + 10.0 * gradient_penalty
        
                #self.reg = tf.contrib.layers.apply_regularization(
                #    tf.contrib.layers.l1_regularizer(2.5e-5),
                #    weights_list=[var for var in tf.global_variables() if 'weights' in var.name]
                #)
                #self._gen_loss = self._gen_loss + self.reg
                #self._dis_loss = self._dis_loss + self.reg
        
                tf.add_to_collection('losses', self._reconstruction_loss)
                #tf.add_to_collection('losses', self._gen_loss)
                #tf.add_to_collection('losses', self._dis_loss)
                self._loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

                ''' 2 - Setup network for TESTing '''
                self._valid_input_ = self._valid_batch
                self._valid_target_ = self._valid_batch

                # Define output
                #self._valid_output = self.construct_graph(self._valid_input_, 1)
                self._valid_output, self._valid_tcn_params_ = self.construct_tcn_graph(self._valid_input_, 1)

                # Define loss
                self._valid_loss = loss_reconstruction(self._valid_output, self._valid_target_,
                                                       self.max_val)
    def __init__(self, shape, sess, batch_size, variance_coef, data_info):
        """Autoencoder initializer

        Args:
          shape:          list of ints specifying
                          num input, hidden1 units,...hidden_n units, num outputs
          sess:           tensorflow session object to use
          batch_size:     batch size
          varience_coef:  multiplicative factor for the variance of noise wrt the variance of data
          data_info:      key information about the dataset
        """

        AutoEncoder.__init__(self,
                             len(shape) - 2, batch_size, FLAGS.chunk_length,
                             sess, data_info)

        self.__shape = shape  # [input_dim,hidden1_dim,...,hidden_n_dim,output_dim]

        self.__variables = {}

        with sess.graph.as_default():

            with tf.variable_scope("AE_Variables"):

                ##############        SETUP VARIABLES       #####################################

                for i in range(self.num_hidden_layers +
                               1):  # go over all layers

                    # create variables for matrices and biases for each layer
                    self._create_variables(i, FLAGS.Weight_decay)

                if FLAGS.reccurent:

                    # Define LSTM cell
                    lstm_sizes = self.__shape[1:]

                    def lstm_cell(size):
                        basic_cell = tf.contrib.rnn.BasicLSTMCell(
                            size, forget_bias=1.0, state_is_tuple=True)
                        # Apply dropout on the hidden layers
                        if size != self.__shape[-1]:
                            hidden_cell = tf.contrib.rnn.DropoutWrapper\
                                (cell=basic_cell, output_keep_prob=FLAGS.dropout)
                            return hidden_cell
                        else:
                            return basic_cell

                    self._RNN_cell = tf.contrib.rnn.MultiRNNCell(
                        [lstm_cell(sz) for sz in lstm_sizes],
                        state_is_tuple=True)

                ##############        DEFINE THE NETWORK     ###################################

                # Declare a mask for simulating missing_values
                self._mask = tf.placeholder(
                    dtype=tf.float32,
                    shape=[
                        FLAGS.batch_size, FLAGS.chunk_length,
                        FLAGS.frame_size * FLAGS.amount_of_frames_as_input
                    ],
                    name='Mask_of_mis_markers')
                self._mask_generator = self.binary_random_matrix_generator(
                    FLAGS.missing_rate)

                # Reminder: we use Denoising AE
                # (http://www.jmlr.org/papers/volume11/vincent10a/vincent10a.pdf)
                ''' 1 - Setup network for TRAINing '''
                # Input noisy data and reconstruct the original one
                self._input_ = add_noise(self._train_batch, variance_coef,
                                         data_info._data_sigma)
                self._target_ = self._train_batch

                # Define output and loss for the training data
                self._output = self.construct_graph(self._input_,
                                                    FLAGS.dropout)
                self._reconstruction_loss = loss_reconstruction(
                    self._output, self._target_, self.max_val)
                tf.add_to_collection('losses', self._reconstruction_loss)
                self._loss = tf.add_n(tf.get_collection('losses'),
                                      name='total_loss')
                ''' 2 - Setup network for TESTing '''
                self._valid_input_ = self._valid_batch
                self._valid_target_ = self._valid_batch

                # Define output
                self._valid_output = self.construct_graph(
                    self._valid_input_, 1)

                # Define loss
                self._valid_loss = loss_reconstruction(self._valid_output,
                                                       self._valid_target_,
                                                       self.max_val)