def test(self): """ """ # save current context self._enter_('test') # create a folder to save test_dir = filesystem.mkdir(self.config.output_dir + '/test/') # get data pipeline data, label, path = loads(self.config) # get network logit, net = self._net(data) loss, mae, rmse = self._loss(logit, label) # prepare total_num = self.data.total_num batchsize = self.data.batchsize num_iter = int(total_num / batchsize) info = string.concat(batchsize, [path, label, logit * self.data.range]) mean_loss, mean_mae, mean_rmse = 0, 0, 0 # get saver saver = tf.train.Saver() with tf.Session() as sess: # get latest checkpoint global_step = self.snapshot.restore(sess, saver) with open(test_dir + '%s.txt' % global_step, 'wb') as fw: with context.QueueContext(sess): for _ in range(num_iter): # running session to acuqire value _loss, _mae, _rmse, _info = sess.run( [loss, mae, rmse, info]) mean_loss += _loss mean_mae += _mae mean_rmse += _rmse # save tensor info to text file [fw.write(_line + b'\r\n') for _line in _info] # statistic mean_loss = 1.0 * mean_loss / num_iter mean_mae = 1.0 * mean_mae / num_iter mean_rmse = 1.0 * mean_rmse / num_iter # display results on screen keys = ['total sample', 'num batch', 'loss', 'mae', 'rmse'] vals = [total_num, num_iter, mean_loss, mean_mae, mean_rmse] logger.test(logger.iters(int(global_step), keys, vals)) # write to summary self.summary.adds(global_step=global_step, tags=['test/loss', 'test/mae', 'test/rmse'], values=[mean_loss, mean_mae, mean_rmse]) self._exit_() return mean_mae
def test(self): """ """ # save current context self._enter_('test') # create a folder to save test_dir = filesystem.mkdir(self.config.output_dir + '/test/') # get data pipeline data, label, path = loads(self.config) # total_num total_num = self.data.total_num batchsize = self.data.batchsize # network logit, net = self._net(data) loss, error, pred = self._loss(logit, label) # prepare info = string.concat(batchsize, [path, label, pred]) num_iter = int(total_num / batchsize) mean_err, mean_loss = 0, 0 # get saver saver = tf.train.Saver() with context.DefaultSession() as sess: global_step = self.snapshot.restore(sess, saver) with open(test_dir + '%s.txt' % global_step, 'wb') as fw: with context.QueueContext(sess): for _ in range(num_iter): _loss, _err, _info = sess.run([loss, error, info]) mean_loss += _loss mean_err += _err [fw.write(_line + b'\r\n') for _line in _info] # statistic mean_loss = 1.0 * mean_loss / num_iter mean_err = 1.0 * mean_err / num_iter # display results on screen keys = ['total sample', 'num batch', 'loss', 'error'] vals = [total_num, num_iter, mean_loss, mean_err] logger.test(logger.iters(int(global_step), keys, vals)) # write to summary self.summary.adds(global_step=global_step, tags=['test/error', 'test/loss'], values=[mean_err, mean_loss]) self._exit_() return mean_err
def test(self): """ we need acquire threshold from validation first """ with tf.Graph().as_default(): self._enter_('val') val_dir = filesystem.mkdir(self.config.output_dir + '/val/') self._val_or_test(val_dir) self._exit_() with tf.Graph().as_default(): self._enter_('test') test_dir = filesystem.mkdir(self.config.output_dir + '/test/') step = self._val_or_test(test_dir) val_err, val_thed, test_err = similarity.get_all_result( self.val_x, self.val_y, self.val_l, self.test_x, self.test_y, self.test_l, False) keys = ['val_error', 'thred', 'test_error'] vals = [val_err, val_thed, test_err] logger.test(logger.iters(int(step) - 1, keys, vals)) self._exit_()