Пример #1
0
    def evaluate(self, iteration, lr=0):
        """Evalation for each epoch.

    Args:
      iteration: current iteration
      lr: learning rate
    Returns:
      Bool whether it is the best model at this point.
    """
        self.clean_acc_history()
        labels, preds, logits = [], [], []
        with self.strategy.scope():
            self.sess.run(self.eval_input_iterator.initialize())
            vds, vbs = self.dataset.val_dataset_size, FLAGS.val_batch_size
            total = vds // vbs + (vds % vbs != 0)
            pbar = tqdm(total=total)
            for _ in range(total):
                try:
                    test_acc, logit, label, merged_summary = self.sess.run(
                        self.eval_op)
                except tf.errors.OutOfRangeError:
                    break
                labels.append(label)
                preds.append(np.argmax(logit, 1))
                logits.append(logit)
                pbar.update(1)
                pbar.set_description(
                    'batch {} accuracy {:.3f} ({:.3f})'.format(
                        label.shape[0],
                        float(
                            utils.topk_accuracy(
                                logit,
                                label,
                                topk=1,
                                ignore_label_above=self.dataset.num_classes)),
                        test_acc))
            pbar.close()
            if FLAGS.mode != 'evaluation':
                self.eval_summary_writer.add_summary(merged_summary,
                                                     self.global_step.eval())
            # Updates this variable and update in next round train
            labels, preds, logits = np.concatenate(labels, 0), np.concatenate(
                preds, 0), np.concatenate(logits, 0)
            offline_accuracy, num_evaluated = utils.topk_accuracy(
                logits,
                labels,
                topk=1,
                ignore_label_above=self.dataset.num_classes,
                return_counts=True)
            top5acc = utils.topk_accuracy(
                logits,
                labels,
                topk=5,
                ignore_label_above=self.dataset.num_classes)

            self.clean_acc_history()
            logging.info('[Evaluation] lr {:.5f} global_step {} total {} acc '
                         '{:.3f} (top-5 {:.3f})'.format(
                             float(lr), iteration, num_evaluated,
                             offline_accuracy, float(top5acc)))
Пример #2
0
    def evaluate(self, iteration, lr=0, op=None, op_scope=''):
        """Evalation for each epoch.

    Args:
      iteration: current iteration
      lr: learning rate
      op: alternative to self.eval_op for a certain dataset
      op_scope: dataset scope name of the op
    """
        self.clean_acc_history()
        labels, logits = [], []
        if op is not None:
            assert op_scope
            eval_op = op
        else:
            eval_op = self.eval_op
            op_scope = 'val'

        with self.strategy.scope():
            self.sess.run(self.eval_input_iterator.initializer)
            vds, vbs = self.dataset.val_dataset_size, FLAGS.val_batch_size
            total = vds // vbs + (vds % vbs != 0)
            for _ in range(total):
                try:
                    online_acc, logit, label, merged_summary = self.sess.run(
                        eval_op)
                except tf.errors.OutOfRangeError:
                    break
                if FLAGS.summary_eval_to_train:
                    labels.append(label)
                    logits.append(logit)
                else:
                    del logit, label

            if FLAGS.mode != 'evaluation':
                self.write_to_summary(self.eval_summary_writer,
                                      merged_summary,
                                      self.global_step.eval(),
                                      flush=True)
            if FLAGS.summary_eval_to_train:
                # Updates this variable and update in next round train
                labels, logits = np.concatenate(labels,
                                                0), np.concatenate(logits, 0)
                offline_accuracy, num_evaluated = utils.topk_accuracy(
                    logits,
                    labels,
                    topk=1,
                    # Useful for eval imagenet on webvision mini 50 classes.
                    ignore_label_above=self.dataset.num_classes,
                    return_counts=True)
                top5acc = utils.topk_accuracy(
                    logits,
                    labels,
                    topk=5,
                    ignore_label_above=self.dataset.num_classes)
                if op is None and FLAGS.mode != 'evaluation':
                    # We only expoert validation op results to self.eval_acc_on_train.
                    self.sess.run(self.eval_acc_on_train_assign_op,
                                  feed_dict={
                                      self.eval_acc_on_train_pl:
                                      np.array([
                                          float(offline_accuracy),
                                          float(top5acc), num_evaluated
                                      ])
                                  })
            else:
                num_evaluated = -1
                offline_accuracy = online_acc
                top5acc = -1
            self.clean_acc_history()
            logging.info(
                'Evaluation ({}): lr {:.5f} global_step {} total {} acc '
                '{:.3f} (top-5 {:.3f})'.format(op_scope, float(lr), iteration,
                                               num_evaluated, offline_accuracy,
                                               float(top5acc)))