def setup(self):
        tf.reset_default_graph()
        self.x = tf.placeholder(dtype=tf.float32,
                                shape=[None,self.no_of_features],
                                name="input")
        self.y = tf.placeholder(dtype=tf.float32, shape=[None, 1], name="labels")
        self.lr = tf.placeholder("float", shape=[])
        self.is_train = tf.placeholder(tf.bool, shape=[])

        if self.logits==None:
            self.logits=self.get_model(self.x,self.is_train)
        else:
            self.logits=self.logits(self.x,self.is_train)
        with tf.name_scope('Output'):
            self.cross_entropy = ops.get_loss(self.logits, self.y, self.loss_type)
            if self.regularization_type != None:
                self.cross_entropy = ops.get_regularization(self.cross_entropy, self.regularization_type,
                                                            self.regularization_coefficient)
            self.prediction = self.logits
            tf.summary.scalar("Cross_Entropy", self.cross_entropy)

        with tf.name_scope('Optimizer'):
            if self.optimizer==None:
                # learningRate = tf.train.exponential_decay(learning_rate=learning_rate, global_step=1,
                #                                          decay_steps=shape[0], decay_rate=0.97, staircase=True,
                #                                          name='Learning_Rate')
                # optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
                # optimizer = tf.train.MomentumOptimizer(lr, .9, use_nesterov=True).minimize(cross_entropy)
                self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.cross_entropy)
                # optimizer = tf.train.AdadeltaOptimizer(lr).minimize(cross_entropy)
        self.session = tf.InteractiveSession()
        return
    def setup(self):
        tf.reset_default_graph()
        self.x = tf.placeholder(
            dtype=tf.float32,
            shape=[None, self.sequence_length, self.sequence_dimensions],
            name="input")
        self.y = tf.placeholder(dtype=tf.float32,
                                shape=[None, self.no_of_classes],
                                name="labels")
        self.lr = tf.placeholder("float", shape=[])
        self.is_train = tf.placeholder(tf.bool, shape=[])

        if self.logits == None:
            self.logits = self.get_model(self.x, self.is_train)
        else:
            self.logits = self.logits(self.x, self.is_train)
        with tf.name_scope('Output'):
            self.cross_entropy = ops.get_loss(self.logits, self.y,
                                              self.loss_type)
            if self.regularization_type != None:
                self.cross_entropy = ops.get_regularization(
                    self.cross_entropy, self.regularization_type,
                    self.regularization_coefficient)
            self.probability = tf.nn.softmax(self.logits, name="softmax")
            self.prediction = tf.argmax(self.probability, 1, name='Prediction')
            correct_prediction = tf.equal(self.prediction,
                                          tf.argmax(self.y, 1),
                                          name='Correct_prediction')
            self.accuracy = tf.reduce_mean(tf.cast(correct_prediction,
                                                   tf.float32),
                                           name='Accuracy')
            tf.summary.scalar("Cross_Entropy", self.cross_entropy)
            tf.summary.scalar("Accuracy", self.accuracy)

        with tf.name_scope('Optimizer'):
            if self.optimizer == None:
                # learningRate = tf.train.exponential_decay(learning_rate=learning_rate, global_step=1,
                #                                          decay_steps=shape[0], decay_rate=0.97, staircase=True,
                #                                          name='Learning_Rate')
                # optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
                # optimizer = tf.train.MomentumOptimizer(lr, .9, use_nesterov=True).minimize(cross_entropy)
                self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(
                    self.cross_entropy)
                # optimizer = tf.train.AdadeltaOptimizer(lr).minimize(cross_entropy)
        self.session = tf.InteractiveSession()
        return
    def setup(self):
        tf.reset_default_graph()
        self.x = tf.placeholder(dtype=tf.float32,
                                shape=[None, self.no_of_features],
                                name="input")
        self.lr = tf.placeholder("float", shape=[])
        self.is_train = tf.placeholder(tf.bool, shape=[])

        if self.encoder_op==None:
            self.encoder_op=self.get_encoder(self.x,self.is_train)
        else:
            self.encoder_op=self.encoder_op(self.x,self.is_train)

        if self.decoder_op==None:
            self.decoder_op=self.get_decoder(self.x,self.is_train)
        else:
            self.decoder_op=self.decoder_op(self.x,self.is_train)

        with tf.name_scope('Output'):
            self.cross_entropy = ops.get_loss(self.decoder_op, self.x, self.loss_type)
            if self.regularization_type != None:
                self.cross_entropy = ops.get_regularization(self.cross_entropy, self.regularization_type,
                                                            self.regularization_coefficient)
            self.cosine_similarity=distance_metric.cosine_similarity(self.decoder_op,self.x)
            print(self.cosine_similarity.get_shape().as_list())

            tf.summary.scalar("Cross_Entropy", self.cross_entropy)
            tf.summary.scalar("Accuracy", tf.reduce_mean(self.cosine_similarity))

        with tf.name_scope('Optimizer'):
            if self.optimizer==None:
                # learningRate = tf.train.exponential_decay(learning_rate=learning_rate, global_step=1,
                #                                          decay_steps=shape[0], decay_rate=0.97, staircase=True,
                #                                          name='Learning_Rate')
                # optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy)
                # optimizer = tf.train.MomentumOptimizer(lr, .9, use_nesterov=True).minimize(cross_entropy)
                self.optimizer = tf.train.AdamOptimizer(self.lr).minimize(self.cross_entropy)
                # optimizer = tf.train.AdadeltaOptimizer(lr).minimize(cross_entropy)
        self.session = tf.InteractiveSession()
        return