Ejemplo n.º 1
0
    def test(self):

        print('STARTED TESTING EVALUATION!')

        losses = []
        preds = np.ndarray((0, ), dtype=np.int64)
        labels = np.ndarray((0, ), dtype=np.int64)

        for step in range(self.dataset.num_batches_test):

            eval_tensors = [
                self.Yoh, self.preds, self.loss, self.accuracy, self.precision,
                self.recall
            ]
            if (step + 1) * BATCH_SIZE % LOG_EVERY == 0:
                print("Evaluating {}, done: {}/{}".format(
                    'test', (step + 1) * BATCH_SIZE,
                    self.dataset.num_test_examples))
                eval_tensors += [self.merged_summary_op]

            batch_x, batch_y = self.sess.run(
                [self.dataset.test_images, self.dataset.test_labels])

            feed_dict = {
                self.is_training: False,
                self.X: batch_x,
                self.Y: batch_y
            }

            eval_ret = self.sess.run(eval_tensors, feed_dict=feed_dict)
            eval_ret = dict(zip(eval_tensors, eval_ret))

            if self.merged_summary_op in eval_tensors:
                self.summary_test_writer.add_summary(
                    eval_ret[self.merged_summary_op],
                    self.global_step.eval(session=self.sess))

            losses.append(eval_ret[self.loss])

            if preds.size == 0:
                labels = np.argmax(eval_ret[self.Yoh], axis=1)
                preds = eval_ret[self.preds]
            else:
                labels = np.concatenate(
                    (labels, np.argmax(eval_ret[self.Yoh], axis=1)), axis=0)
                preds = np.concatenate((preds, eval_ret[self.preds]), axis=0)

        total_loss = np.mean(losses)
        acc, pr, rec = util.acc_prec_rec_score(labels, np.argmax(preds,
                                                                 axis=1))
        prAtTop10, top5CorrectWords, top5IncorrectWords = util.testStatistics(
            labels, preds)
        util.write_test_results(total_loss, acc, pr, rec, prAtTop10,
                                top5CorrectWords, top5IncorrectWords,
                                self.dataset.name, self.name)
        print(
            "Validation results -> {} error: epoch {} loss={} accuracy={} precision={} recall={} prAtTop10={}"
            .format('test', '1', total_loss, acc, pr, rec, prAtTop10))
Ejemplo n.º 2
0
    def testLowMemory(self):

        losses = []
        preds = np.ndarray((0,), dtype=np.int64)
        labels = np.ndarray((0,), dtype=np.int64)

        for step in range(self.dataset.num_batches_test):

            # get data
            batch_x, batch_y = self.sess.run([self.dataset.test_images, self.dataset.test_labels])

            # train towers logits
            logits = []
            for sequence_image in range(self.dataset.frames):
                feed_dict = {self.towerImage: batch_x[:, sequence_image], self.is_training: False}
                eval_tensors = self.towerNet
                logits.append(self.sess.run(eval_tensors, feed_dict))
            logits = np.transpose(np.array(logits), [1, 0, 2, 3, 4])

            # logits
            feed_dict = {self.towerLogits: logits, self.is_training: False}
            eval_tensors = self.logits
            logits = self.sess.run(eval_tensors, feed_dict)

            # get predictions
            feed_dict = {self.optLogits: logits, self.Y: batch_y, self.is_training: False}
            eval_tensors = [self.loss, self.preds]
            if (step + 1) * BATCH_SIZE % 5000 == 0:
                eval_tensors += [self.merged_summary_op]
                loss_val, predsEval, merged_ops = self.sess.run(eval_tensors, feed_dict=feed_dict)
                self.summary_test_writer.add_summary(merged_ops, self.global_step.eval(session=self.sess))
            else:
                loss_val, predsEval = self.sess.run(eval_tensors, feed_dict)

            if (step + 1) * BATCH_SIZE % LOG_EVERY == 0:
                print("Evaluating {}, done: {}/{}".format('test', (step + 1) * BATCH_SIZE, self.dataset.num_test_examples))

            losses.append(loss_val)

            if preds.size == 0:
                labels = batch_y
                preds = predsEval
            else:
                labels = np.concatenate((labels, batch_y), axis=0)
                preds = np.concatenate((preds, predsEval), axis=0)

        total_loss = np.mean(losses)
        acc, pr, rec = util.acc_prec_rec_score(labels, np.argmax(preds, axis=1))
        prAtTop10, top5CorrectWords, top5IncorrectWords = util.testStatistics(labels, preds)
        util.write_test_results(total_loss, acc, pr, rec, prAtTop10, top5CorrectWords, top5IncorrectWords,
                                self.dataset.name, self.name)
        print("Validation results -> {} error: epoch {} loss={} accuracy={} precision={} recall={} prAtTop10={}".format(
            'test', '1', total_loss, acc, pr, rec, prAtTop10))
Ejemplo n.º 3
0
    def test(self):

        print('STARTED TESTING EVALUATION!')

        losses = []
        preds = np.ndarray((0,), dtype=np.int64)
        labels = np.ndarray((0,), dtype=np.int64)

        for step in range(self.dataset.num_batches_test):

            eval_tensors = [self.Yoh, self.preds, self.loss]
            if (step + 1) * BATCH_SIZE % LOG_EVERY == 0:
                print("Evaluating {}, done: {}/{}".format('test', (step + 1) * BATCH_SIZE, self.dataset.num_test_examples))
                eval_tensors += [self.merged_summary_op]

            batch_x, batch_y = self.sess.run([self.dataset.test_images, self.dataset.test_labels])

            feed_dict = {self.is_training: False, self.X: batch_x, self.Y: batch_y}

            eval_ret = self.sess.run(eval_tensors, feed_dict=feed_dict)
            eval_ret = dict(zip(eval_tensors, eval_ret))

            if self.merged_summary_op in eval_tensors:
                self.summary_test_writer.add_summary(eval_ret[self.merged_summary_op], self.global_step.eval(session=self.sess))

            losses.append(eval_ret[self.loss])

            if preds.size == 0:
                labels = np.argmax(eval_ret[self.Yoh], axis=1)
                preds = eval_ret[self.preds]
            else:
                labels = np.concatenate((labels, np.argmax(eval_ret[self.Yoh], axis=1)), axis=0)
                preds = np.concatenate((preds, eval_ret[self.preds]), axis=0)

        total_loss = np.mean(losses)
        acc, pr, rec = util.acc_prec_rec_score(labels, np.argmax(preds, axis=1))
        prAtTop10, top5CorrectWords, top5IncorrectWords = util.testStatistics(labels, preds)
        util.write_test_results(total_loss, acc, pr, rec, prAtTop10, top5CorrectWords, top5IncorrectWords, self.dataset.name, self.name)
        print("Validation results -> {} error: epoch {} loss={} accuracy={} precision={} recall={} prAtTop10={}".format('test', '1', total_loss, acc, pr, rec, prAtTop10))