Esempio n. 1
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # get network
        logit, net = self._net(data)
        loss, mae, rmse = self._loss(logit, label)

        # prepare
        total_num = self.data.total_num
        batchsize = self.data.batchsize
        num_iter = int(total_num / batchsize)
        info = string.concat(batchsize, [path, label, logit * self.data.range])
        mean_loss, mean_mae, mean_rmse = 0, 0, 0

        # get saver
        saver = tf.train.Saver()
        with tf.Session() as sess:
            # get latest checkpoint
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        # running session to acuqire value
                        _loss, _mae, _rmse, _info = sess.run(
                            [loss, mae, rmse, info])
                        mean_loss += _loss
                        mean_mae += _mae
                        mean_rmse += _rmse
                        # save tensor info to text file
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_mae = 1.0 * mean_mae / num_iter
                    mean_rmse = 1.0 * mean_rmse / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'mae', 'rmse']
            vals = [total_num, num_iter, mean_loss, mean_mae, mean_rmse]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/loss', 'test/mae', 'test/rmse'],
                              values=[mean_loss, mean_mae, mean_rmse])

            self._exit_()
            return mean_mae
Esempio n. 2
0
    def test(self):
        """
    """
        # save current context
        self._enter_('test')

        # create a folder to save
        test_dir = filesystem.mkdir(self.config.output_dir + '/test/')

        # get data pipeline
        data, label, path = loads(self.config)

        # total_num
        total_num = self.data.total_num
        batchsize = self.data.batchsize

        # network
        logit, net = self._net(data)
        loss, error, pred = self._loss(logit, label)

        # prepare
        info = string.concat(batchsize, [path, label, pred])
        num_iter = int(total_num / batchsize)
        mean_err, mean_loss = 0, 0

        # get saver
        saver = tf.train.Saver()
        with context.DefaultSession() as sess:
            global_step = self.snapshot.restore(sess, saver)
            with open(test_dir + '%s.txt' % global_step, 'wb') as fw:
                with context.QueueContext(sess):
                    for _ in range(num_iter):
                        _loss, _err, _info = sess.run([loss, error, info])
                        mean_loss += _loss
                        mean_err += _err
                        [fw.write(_line + b'\r\n') for _line in _info]

                    # statistic
                    mean_loss = 1.0 * mean_loss / num_iter
                    mean_err = 1.0 * mean_err / num_iter

            # display results on screen
            keys = ['total sample', 'num batch', 'loss', 'error']
            vals = [total_num, num_iter, mean_loss, mean_err]
            logger.test(logger.iters(int(global_step), keys, vals))

            # write to summary
            self.summary.adds(global_step=global_step,
                              tags=['test/error', 'test/loss'],
                              values=[mean_err, mean_loss])

            self._exit_()
            return mean_err
Esempio n. 3
0
    def test(self):
        """ we need acquire threshold from validation first """
        with tf.Graph().as_default():
            self._enter_('val')
            val_dir = filesystem.mkdir(self.config.output_dir + '/val/')
            self._val_or_test(val_dir)
            self._exit_()

        with tf.Graph().as_default():
            self._enter_('test')
            test_dir = filesystem.mkdir(self.config.output_dir + '/test/')
            step = self._val_or_test(test_dir)
            val_err, val_thed, test_err = similarity.get_all_result(
                self.val_x, self.val_y, self.val_l, self.test_x, self.test_y,
                self.test_l, False)
            keys = ['val_error', 'thred', 'test_error']
            vals = [val_err, val_thed, test_err]
            logger.test(logger.iters(int(step) - 1, keys, vals))
            self._exit_()
Esempio n. 4
0
  def after_run(self, run_context, run_values):
    cur_iter = run_values.results[0] - 1
    self.mean_values[:-1] += run_values.results[1:]
    self.mean_values[-1] += (time.time() - self.start_time) * 1000

    if cur_iter % self.config.print_invl == 0 and cur_iter != 0:
      self.mean_values /= self.config.print_invl
      logger.train(logger.iters(cur_iter, self.keys, self.mean_values))
      np.zeros_like(self.mean_values)

    if (cur_iter - 1) % self.config.val_invl == 0:
      if self.func_val is not None:
        with tf.Graph().as_default():
          self.func_val()

    if (cur_iter - 1) % self.config.test_invl == 0:
      if self.func_test is not None:
        with tf.Graph().as_default():
          self.func_test()

    if cur_iter == self.config.max_iter:
      logger.sys('Has achieved the maximum iterations, \
          the system will terminate.')
      exit(0)