Esempio n. 1
0
 def run_eval(self, sess, data, summary_writer=None, step=0):
     y, y_pred, loss_, metrics, p_k = list(), list(), 0.0, None, None
     accuracy, loss = 0.0, 0.0
     merged_summary = self.summarizer.merge_all()
     i = 0
     for X, Y, tot in self.data.next_batch(data):
         feed_dict = {self.x: X, self.y: Y, self.z : np.random.random((Y.shape[0], self.config.solver.randomvar_dim)), self.keep_prob: 1}
         if i == tot-1 and summary_writer is not None:
             if data == "validation":
                 summ, loss_ =  sess.run([merged_summary, self.loss], feed_dict=feed_dict)
             else :
                 summ, loss_, accuracy_val = sess.run([merged_summary, self.loss, self.accuracy], feed_dict=feed_dict)
             summary_writer.add_summary(summ, step)
         else:
             if data == "validation":
                 loss_, Y_pred=  sess.run([self.loss, tf.nn.sigmoid(self.y_pred)], feed_dict=feed_dict)
                 p_k = patk(predictions=Y_pred, labels=Y)
             else :
                 loss_, Y_pred, accuracy_val = sess.run([self.loss, tf.nn.sigmoid(self.y_pred), self.accuracy], feed_dict=feed_dict)
                 metrics = evaluate(predictions=Y_pred, labels=Y)
                 p_k = patk(predictions=Y_pred, labels=Y)
                 accuracy += accuracy_val #metrics['accuracy']
         loss += loss_
         i += 1
     return loss / i , accuracy / self.config.batch_size, metrics, p_k
 def run_eval(self,
              sess,
              data,
              summary_writer=None,
              step=0,
              type_loss="NORMAL"):
     y, y_pred, loss_, metrics, p_k, Y, Y_pred = list(), list(
     ), 0.0, None, None, None, None
     accuracy, loss = 0.0, 0.0
     merged_summary = self.summarizer.merge_all()
     i = 0
     for X, Y, tot in self.data.next_batch(data):
         feed_dict = {self.x: X, self.y: Y, self.keep_prob: 1}
         if (type_loss == "AUTO"):
             summ, loss_ = sess.run([merged_summary, self.autoencoder_loss],
                                    feed_dict=feed_dict)
         else:
             if i == tot - 1 and summary_writer is not None:
                 if data == "validation":
                     summ, loss_ = sess.run([merged_summary, self.loss],
                                            feed_dict=feed_dict)
                 else:
                     summ, loss_, accuracy_val = sess.run(
                         [merged_summary, self.loss, self.accuracy],
                         feed_dict=feed_dict)
                 summary_writer.add_summary(summ, step)
             else:
                 if data == "validation":
                     loss_, Y_pred = sess.run(
                         [self.loss, tf.nn.sigmoid(self.y_pred)],
                         feed_dict=feed_dict)
                     p_k = patk(predictions=Y_pred, labels=Y)
                 else:
                     loss_, Y_pred, accuracy_val = sess.run(
                         [
                             self.loss,
                             tf.nn.sigmoid(self.y_pred), self.accuracy
                         ],
                         feed_dict=feed_dict)
                     metrics = evaluate(predictions=Y_pred, labels=Y)
                     accuracy += accuracy_val  #metrics['accuracy']
         loss += loss_
         i += 1
     if data == "test":
         #X, Y = self.data.get_test()
         p_k = patk(
             predictions=Y_pred, labels=Y
         )  #sess.run(self.patk, feed_dict={self.x: X, self.y: Y, self.keep_prob: 1})
     return loss / i, accuracy / self.config.batch_size, metrics, p_k
Esempio n. 3
0
 def get_metrics(self, sess, data):
     accuracy, y_pred, i = 0.0, None, 0.0
     for X, Y, tot in self.data.next_batch(data):
         feed_dict = {self.x: X, self.y: Y, self.keep_prob: 1.0}
         Y_pred, accuracy_val = sess.run([tf.nn.sigmoid(self.y_pred), self.accuracy], feed_dict=feed_dict)
         metrics = evaluate(predictions=Y_pred, labels=Y)
         p_k = patk(predictions=Y_pred, labels=Y)
         accuracy += accuracy_val 
         i += 1
     return metrics, accuracy / i, p_k