def square_error(estimated, target): with tf.name_scope('evaluation'): with tf.control_dependencies([tf.assert_equal(count(tf.to_int32(target) - tf.to_int32(target)), 0.)]): tf.assert_equal(count(tf.cast(target - estimated, tf.int32)), 0.) squared_difference = tf.pow(estimated - target, 2, name='squared_difference') square_error = tf.reduce_sum(squared_difference, name='summing_square_errors') square_error = tf.to_float(square_error) return square_error
def mini_batch_rmse(estimated, target): with tf.name_scope('evaluation'): with tf.control_dependencies([tf.assert_equal(count(tf.to_int32(target) - tf.to_int32(target)), 0.)]): squared_difference = tf.pow(estimated - target, 2, name='squared_difference') square_error = tf.reduce_sum(squared_difference, name='summing_square_errors') square_error = tf.to_float(square_error) mse = tf.truediv(square_error, count(target), name='meaning_error') rmse = tf.sqrt(mse) return rmse
def rmse(self, sess, square_error_batch, x_sparse, target, data_set, is_train): #Untested square_error = 0 num_examples = 0 self.Train_set.reset('rmse') data_set.reset('rmse') steps_per_epoch = data_set.size // self.batch_size_evaluate for step in range(steps_per_epoch): feed_dict = self.fill_feed_dict_mini_batch(data_set=data_set, x_sparse=x_sparse, target=target, is_train=is_train, batch_size=self.batch_size_evaluate) num_examples += sess.run(count(target), feed_dict=feed_dict) square_error += sess.run(square_error_batch, feed_dict=feed_dict) mean_square_error = square_error / num_examples rmse = math.sqrt(mean_square_error) rmse *= 2 return rmse