コード例 #1
0
 def setUp(self):
     self.evaluator = Evaluator(gram_size=4)
     self.test_dataframe = pd.DataFrame(data={
         "sentence": ["This is a test", ""],
         "language": ["en", "fr"]
     })
     self.sentence_values = pd.Series(["This is a test", ""]).values
コード例 #2
0
 def test_Quizzer(self):
     file = "Manifest.json"
     fm = FileManagement()
     evaluator = Evaluator()
     counter = 0
     fileManifest = fm.parseFileJSON(file)
     for item in fileManifest["tests"]:
         quizz = fm.parseUrlJSON(item["quizz"])
         assessment = fm.parseUrlJSON(item["assessment"])
         scores = fm.parseUrlJSON(item["scores"])
         calculatedScores = (evaluator.evaluate(quizz,assessment))
         print("Exam ", counter)
         evaluator.compareScores(scores,calculatedScores)
         counter += 1
     self.assertTrue(evaluator.compareScores(scores,calculatedScores))
コード例 #3
0
    def __init__(self):
        classifier = Classifier()
        classifier.Build()

        # Trainer, Evaluator
        print("Reading Training set...")
        # self.setdata('something')
        self.trainer = Trainer(classifier)

        self.trainEvaluator = Evaluator("train",
                                        dataSettings.PATH_TO_TRAIN_SET_CATELOG,
                                        classifier)
        print("\t Done.\n")

        print("Reading Validation set...")
        self.validationEvaluator = Evaluator(
            "validation", dataSettings.PATH_TO_VAL_SET_CATELOG, classifier)
        print("\t Done.\n")

        print("Reading Test set...")
        self.testEvaluator = Evaluator("test",
                                       dataSettings.PATH_TO_TEST_SET_CATELOG,
                                       classifier)
        print("\t Done.\n")

        # Summary
        summaryOp = tf.summary.merge_all()
        self.trainer.SetMergedSummaryOp(summaryOp)
        self.trainEvaluator.SetMergedSummaryOp(summaryOp)
        self.validationEvaluator.SetMergedSummaryOp(summaryOp)
        self.bestThreshold = None
        self.testEvaluator.SetMergedSummaryOp(summaryOp)

        # Time
        self._startTrainEpochTime = time.time()
        self._trainCountInOneEpoch = 0

        # Saver
        self.modelSaver = tf.train.Saver(
            max_to_keep=trainSettings.MAX_TRAINING_SAVE_MODEL)

        # Session
        self.session = tf.Session()
        init = tf.global_variables_initializer()
        self.session.run(init)

        self.trainer.SetGraph(self.session.graph)
        self.validationEvaluator.SetGraph(self.session.graph)
コード例 #4
0
    else:
        print("\t     loss:", floatPrecision.format(loss_),
              "     frame accuracy:", floatPrecision.format(frameAccuracy_),
              "     given frame threshold:",
              threshold_, "     video accuracy:",
              floatPrecision.format(videoAccuracy_), "     duration:",
              "{0:.2f}".format(duration_) + "(s)\n")


if __name__ == '__main__':
    numberOfArguments = len(sys.argv)
    if (numberOfArguments == 2) or (numberOfArguments == 3):
        PATH_TO_DATA_SET_CATELOG = sys.argv[1]
        classifier = Classifier()
        classifier.Build()
        evaluator = Evaluator("evaluate", PATH_TO_DATA_SET_CATELOG, classifier)

        with tf.Session() as session:
            init = tf.global_variables_initializer()
            session.run(init)

            print("Load Model from: ", evalSettings.PATH_TO_MODEL_CHECKPOINTS)
            modelLoader = tf.train.Saver()
            modelLoader.restore(session,
                                evalSettings.PATH_TO_MODEL_CHECKPOINTS)

            startEvaluateTime = time.time()
            if numberOfArguments == 2:
                print("Start evaluate: ", PATH_TO_DATA_SET_CATELOG,
                      ", and find the best threshold...")
                loss, frameAccuracy, threshold, videoAccuracy = evaluator.Evaluate(
コード例 #5
0
class Main:
    def __init__(self):
        classifier = Classifier()
        classifier.Build()

        # Trainer, Evaluator
        print("Reading Training set...")
        self.trainer = Trainer(classifier)
        self.trainEvaluator = Evaluator("train",
                                        dataSettings.PATH_TO_TRAIN_SET_CATELOG,
                                        classifier)
        print("\t Done.\n")

        print("Reading Validation set...")
        self.validationEvaluator = Evaluator(
            "validation", dataSettings.PATH_TO_VAL_SET_CATELOG, classifier)
        print("\t Done.\n")

        print("Reading Test set...")
        self.testEvaluator = Evaluator("test",
                                       dataSettings.PATH_TO_TEST_SET_CATELOG,
                                       classifier)
        print("\t Done.\n")

        # Summary
        summaryOp = tf.summary.merge_all()
        self.trainer.SetMergedSummaryOp(summaryOp)
        self.trainEvaluator.SetMergedSummaryOp(summaryOp)
        self.validationEvaluator.SetMergedSummaryOp(summaryOp)
        self.bestThreshold = None
        self.testEvaluator.SetMergedSummaryOp(summaryOp)

        # Time
        self._startTrainEpochTime = time.time()
        self._trainCountInOneEpoch = 0

        # Saver
        self.modelSaver = tf.train.Saver(
            max_to_keep=trainSettings.MAX_TRAINING_SAVE_MODEL)

        # Session
        self.session = tf.Session()
        init = tf.global_variables_initializer()
        self.session.run(init)

        self.trainer.SetGraph(self.session.graph)
        self.validationEvaluator.SetGraph(self.session.graph)

    def __del__(self):
        self.session.close()

    def Run(self):
        self.recoverFromPretrainModelIfRequired()

        self.calculateValidationBeforeTraining()
        self.resetTimeMeasureVariables()

        print("Path to save mode: ", trainSettings.PATH_TO_SAVE_MODEL)
        print("\nStart Training...\n")

        while self.trainer.currentEpoch < trainSettings.MAX_TRAINING_EPOCH:
            self.trainer.PrepareNewBatchData()
            self.trainer.Train(self.session)
            self._trainCountInOneEpoch += 1

            if self.trainer.isNewEpoch:
                print(
                    "Epoch:", self.trainer.currentEpoch,
                    "======================================" +
                    "======================================" +
                    "======================================")

                self.printTimeMeasurement()
                self.trainer.PauseDataLoading()

                self.evaluateValidationSetAndPrint(self.trainer.currentEpoch)
                self.evaluateTrainingSetAndPrint(self.trainer.currentEpoch)

                if trainSettings.PERFORM_DATA_AUGMENTATION:
                    # Preload TrainBatch while evaluate the TestSet
                    self.trainer.ContinueDataLoading()

                self.evaluateTestSetAndPrint(self.trainer.currentEpoch)

                self.trainer.ContinueDataLoading()

                self.resetTimeMeasureVariables()

                if self.trainer.currentEpoch >= trainSettings.EPOCHS_TO_START_SAVE_MODEL:
                    self.saveCheckpoint(self.trainer.currentEpoch)
        print("Optimization finished.")
        self.trainer.Release()
        self.trainEvaluator.Release()
        self.validationEvaluator.Release()
        self.testEvaluator.Release()

    def recoverFromPretrainModelIfRequired(self):
        if trainSettings.PRETRAIN_MODEL_PATH_NAME != "":
            print("Load Pretrain model from: " +
                  trainSettings.PRETRAIN_MODEL_PATH_NAME)
            listOfAllVariables = tf.get_collection(
                tf.GraphKeys.GLOBAL_VARIABLES)
            variablesToBeRecovered = [ eachVariable for eachVariable in listOfAllVariables \
                  if eachVariable.name.split('/')[0] not in \
                  trainSettings.NAME_SCOPES_NOT_TO_RECOVER_FROM_CHECKPOINT ]
            modelLoader = tf.train.Saver(variablesToBeRecovered)
            modelLoader.restore(self.session,
                                trainSettings.PRETRAIN_MODEL_PATH_NAME)

    def evaluateTrainingSetAndPrint(self, currentEpoch_):
        '''
		    Since the BATCH_SIZE may be small (= 4 in my case), its BatchLoss or BatchAccuracy
		    may be fluctuated.  Calculate the whole Training Loss instead.
		    Note: If one want to calculate the BatchLoss ONLY, use Trainer.EvaluateTrainLoss().
		'''
        startEvaluateTime = time.time()
        loss, frameAccuracy, threshold, videoAccuracy = self.trainEvaluator.Evaluate(
            self.session,
            currentEpoch_=currentEpoch_,
            threshold_=self.bestThreshold)
        endEvaluateTime = time.time()

        self.printCalculationResults(jobType_='train',
                                     loss_=loss,
                                     frameAccuracy_=frameAccuracy,
                                     isThresholdOptimized_=False,
                                     threshold_=threshold,
                                     videoAccuracy_=videoAccuracy,
                                     duration_=(endEvaluateTime -
                                                startEvaluateTime))

    def calculateValidationBeforeTraining(self):
        if trainSettings.PRETRAIN_MODEL_PATH_NAME != "":
            print(
                "Validation before Training ",
                "=============================" +
                "======================================" +
                "======================================")
            self.evaluateValidationSetAndPrint(currentEpoch_=0)

    def evaluateValidationSetAndPrint(self, currentEpoch_):
        startEvaluateTime = time.time()
        loss, frameAccuracy, threshold, videoAccuracy = self.validationEvaluator.Evaluate(
            self.session, currentEpoch_=currentEpoch_, threshold_=None)
        endEvaluateTime = time.time()

        self.bestThreshold = threshold
        self.printCalculationResults(jobType_='validation',
                                     loss_=loss,
                                     frameAccuracy_=frameAccuracy,
                                     isThresholdOptimized_=True,
                                     threshold_=threshold,
                                     videoAccuracy_=videoAccuracy,
                                     duration_=(endEvaluateTime -
                                                startEvaluateTime))

    def evaluateTestSetAndPrint(self, currentEpoch_):
        startEvaluateTime = time.time()
        loss, frameAccuracy, threshold, videoAccuracy = self.testEvaluator.Evaluate(
            self.session,
            currentEpoch_=currentEpoch_,
            threshold_=self.bestThreshold)
        endEvaluateTime = time.time()

        self.printCalculationResults(jobType_='test',
                                     loss_=loss,
                                     frameAccuracy_=frameAccuracy,
                                     isThresholdOptimized_=False,
                                     threshold_=threshold,
                                     videoAccuracy_=videoAccuracy,
                                     duration_=(endEvaluateTime -
                                                startEvaluateTime))

    def printTimeMeasurement(self):
        timeForTrainOneEpoch = time.time() - self._startTrainEpochTime
        print("\t Back Propergation time measurement:")
        print("\t\t duration: ", "{0:.2f}".format(timeForTrainOneEpoch),
              "s/epoch")
        averagedTrainTime = timeForTrainOneEpoch / self._trainCountInOneEpoch
        print("\t\t average: ", "{0:.2f}".format(averagedTrainTime), "s/batch")
        print()

    def resetTimeMeasureVariables(self):
        self._startTrainEpochTime = time.time()
        self._trainCountInOneEpoch = 0

    def printCalculationResults(self, jobType_, loss_, frameAccuracy_,
                                isThresholdOptimized_, threshold_,
                                videoAccuracy_, duration_):
        floatPrecision = "{0:.4f}"
        print("\t " + jobType_ + ":")
        if isThresholdOptimized_:
            print("\t     loss:",
                  floatPrecision.format(loss_), "     frame accuracy:",
                  floatPrecision.format(frameAccuracy_),
                  "     best frame threshold:",
                  threshold_, "     video accuracy:",
                  floatPrecision.format(videoAccuracy_), "     duration:",
                  "{0:.2f}".format(duration_) + "(s)\n")
        else:
            print("\t     loss:",
                  floatPrecision.format(loss_), "     frame accuracy:",
                  floatPrecision.format(frameAccuracy_),
                  "     given frame threshold:",
                  threshold_, "     video accuracy:",
                  floatPrecision.format(videoAccuracy_), "     duration:",
                  "{0:.2f}".format(duration_) + "(s)\n")

    def saveCheckpoint(self, currentEpoch_):
        pathToSaveCheckpoint = os.path.join(trainSettings.PATH_TO_SAVE_MODEL,
                                            "save_epoch_" + str(currentEpoch_))
        checkpointPathFileName = os.path.join(pathToSaveCheckpoint,
                                              "ViolenceNet.ckpt")
        self.modelSaver.save(self.session, checkpointPathFileName)
コード例 #6
0
ファイル: deucestest.py プロジェクト: Slug-Man/ComboTrainer
from copy import deepcopy

from src import Card
from src import Deck
from src import Evaluator
from src import hand_range_to_cards

evaluator = Evaluator()
original_deck = Deck()

hands = hand_range_to_cards('random')

i = 0
for hand in hands:
    i += 1
    deck = deepcopy(original_deck)
    deck.draw_card(hand[0])
    deck.draw_card(hand[1])

    board = [deck.draw_rank('A'), deck.draw_rank('6'), deck.draw_rank('2')]

    print evaluator.evaluate(hand, board),
    Card.print_pretty_cards(board + hand)

print i
コード例 #7
0
class TestEvaluator(unittest.TestCase):
    def setUp(self):
        self.evaluator = Evaluator(gram_size=4)
        self.test_dataframe = pd.DataFrame(data={
            "sentence": ["This is a test", ""],
            "language": ["en", "fr"]
        })
        self.sentence_values = pd.Series(["This is a test", ""]).values

    def test_correctly_initialised_with_gram_size(self):
        self.assertEqual(self.evaluator.grams, 4)

    def test_evaluate_model_with_invalid_for_languages_option(self):
        with self.assertRaises(ValueError) as context:
            evaluation = self.evaluator.evaluate_model("path/to/model",
                                                       "path/to/test_data",
                                                       for_languages="invalid")
            self.assertEqual(evaluation, None)
        self.assertEqual(
            "'for_languages' option must be either 'all' or 'each'",
            str(context.exception))

    @patch("src.Evaluator._get_model_accuracy")
    @patch("src.evaluator.pd.read_csv")
    @patch("src.evaluator.ModelIO.load_model_from_path")
    def test_evaluate_model(self, mock_load_model, mock_read_csv,
                            mock_get_accuracy):
        # Mock objects, classes and methods
        mocked_model, mocked_test_data = Mock(), Mock()
        mock_load_model.return_value = mocked_model
        mock_read_csv.return_value = mocked_test_data
        mock_get_accuracy.return_value = {"en": 95.0, "fr": 97.0}
        # Run the 'evaluate_model' method
        evaluation = self.evaluator.evaluate_model("path/to/model",
                                                   "path/to/test_data",
                                                   for_languages="all")
        # Make assertions
        mock_load_model.assert_called_with("path/to/model")
        mock_read_csv.assert_called_with("path/to/test_data")
        mock_get_accuracy.assert_called_with(mocked_model,
                                             mocked_test_data,
                                             for_languages="all")
        self.assertIsInstance(evaluation, dict)
        self.assertEqual(evaluation["en"], 95.0)
        self.assertEqual(evaluation["fr"], 97.0)

    @patch("src.Evaluator._get_test_sentences_and_labels")
    def test_get_model_accuracy_for_all_languages(self, mock_sents_and_labels):
        # Mock objects, classes and methods
        mock_sents_and_labels.return_value = [[1, 2, 3, 4]], ["en"]
        mocked_model = Mock()
        mocked_model.score.return_value = 95.0
        # Run the '_get_model_accuracy' method
        accuracy = self.evaluator._get_model_accuracy(mocked_model,
                                                      self.test_dataframe,
                                                      for_languages="all")
        # Make assertions
        mock_sents_and_labels.assert_called_with(self.test_dataframe)
        mocked_model.score.assert_called_with([[1, 2, 3, 4]], ["en"])
        self.assertIsInstance(accuracy, float)
        self.assertEqual(accuracy, 95.0)

    @patch("src.Evaluator._get_test_sentences_and_labels")
    @patch("src.evaluator.pd.Series.unique")
    def test_get_model_accuracy_with_each_language(self, mock_unique,
                                                   mock_sents_and_labels):
        # Mock objects, classes and methods
        mock_unique.return_value = ["en", "fr"]
        mock_sents_and_labels.side_effect = [([[1, 2, 3, 4]], ["en"]),
                                             ([[5]], ["fr"])]
        mocked_model = Mock()
        mocked_model.score.side_effect = [95.0, 97.0]
        # Run the '_get_model_accuracy' method
        accuracies = self.evaluator._get_model_accuracy(mocked_model,
                                                        self.test_dataframe,
                                                        for_languages="each")
        # Make assertions
        mock_unique.assert_called()
        self.assertEqual(mock_sents_and_labels.call_count, 2)
        self.assertEqual(mocked_model.score.call_count, 2)
        self.assertIsInstance(accuracies, dict)
        self.assertEqual(accuracies["en"], 95.0)
        self.assertEqual(accuracies["fr"], 97.0)

    @patch("src.evaluator.Vectoriser")
    @patch("src.evaluator.pd.Series.fillna")
    def test_get_test_sentences_and_labels(self, mock_fillna, mock_vectoriser):
        # Mock classes and methods
        mock_fillna.return_value = ["This is a test", ""]
        mocked_vectoriser = mock_vectoriser.return_value
        mocked_vectoriser.transform.return_value = [[1, 2, 3, 4], []]
        # Run the '_get_test_sentences_and_labels' method
        X, y = self.evaluator._get_test_sentences_and_labels(
            self.test_dataframe)
        # Make assertions
        mock_fillna.assert_called_with("")
        mock_vectoriser.assert_called_with(gram_size=4)
        mocked_vectoriser.transform.assert_called
        self.assertIsInstance(X, list)
        self.assertEqual(X, [[1, 2, 3, 4], []])
        self.assertIsInstance(y, ndarray)
        self.assertEqual(list(y), ["en", "fr"])

    def tearDown(self):
        del self.evaluator
        del self.test_dataframe
コード例 #8
0
def evaluate_model(model_path, test_path, for_languages="all", gram_size=2):
    evaluator = Evaluator(gram_size=gram_size)
    accuracy = evaluator.evaluate_model(model_path,
                                        test_path,
                                        for_languages=for_languages)
    print(accuracy)