def runTestPCAKNN(self, short = 0): LOGGER.info('\nTesting PCA/k-NN classifier') LOGGER.info('Mode=%s', short) numDims = 10 numClasses = 10 k = 10 numPatternsPerClass = 100 numPatterns = int(.9 * numClasses * numPatternsPerClass) numTests = numClasses * numPatternsPerClass - numPatterns numSVDSamples = int(.1 * numPatterns) keep = 1 train_data, train_class, test_data, test_class = \ pca_knn_data.generate(numDims, numClasses, k, numPatternsPerClass, numPatterns, numTests, numSVDSamples, keep) pca_knn = KNNClassifier(k=k,numSVDSamples=numSVDSamples, numSVDDims=keep) knn = KNNClassifier(k=k) LOGGER.info('Training PCA k-NN') for i in range(numPatterns): knn.learn(train_data[i], train_class[i]) pca_knn.learn(train_data[i], train_class[i]) LOGGER.info('Testing PCA k-NN') numWinnerFailures = 0 numInferenceFailures = 0 numDistFailures = 0 numAbsErrors = 0 for i in range(numTests): winner, inference, dist, categoryDist = knn.infer(test_data[i]) pca_winner, pca_inference, pca_dist, pca_categoryDist \ = pca_knn.infer(test_data[i]) if winner != test_class[i]: numAbsErrors += 1 if pca_winner != winner: numWinnerFailures += 1 if (numpy.abs(pca_inference - inference) > 1e-4).any(): numInferenceFailures += 1 if (numpy.abs(pca_dist - dist) > 1e-4).any(): numDistFailures += 1 s0 = 100*float(numTests - numAbsErrors) / float(numTests) s1 = 100*float(numTests - numWinnerFailures) / float(numTests) s2 = 100*float(numTests - numInferenceFailures) / float(numTests) s3 = 100*float(numTests - numDistFailures) / float(numTests) LOGGER.info('PCA/k-NN success rate=%s%s', s0, '%') LOGGER.info('Winner success=%s%s', s1, '%') LOGGER.info('Inference success=%s%s', s2, '%') LOGGER.info('Distance success=%s%s', s3, '%') self.assertEqual(s1, 100.0, "PCA/k-NN test failed")
def runTestPCAKNN(self, short=0): LOGGER.info('\nTesting PCA/k-NN classifier') LOGGER.info('Mode=%s', short) numDims = 10 numClasses = 10 k = 10 numPatternsPerClass = 100 numPatterns = int(.9 * numClasses * numPatternsPerClass) numTests = numClasses * numPatternsPerClass - numPatterns numSVDSamples = int(.1 * numPatterns) keep = 1 train_data, train_class, test_data, test_class = \ pca_knn_data.generate(numDims, numClasses, k, numPatternsPerClass, numPatterns, numTests, numSVDSamples, keep) pca_knn = KNNClassifier(k=k, numSVDSamples=numSVDSamples, numSVDDims=keep) knn = KNNClassifier(k=k) LOGGER.info('Training PCA k-NN') for i in range(numPatterns): knn.learn(train_data[i], train_class[i]) pca_knn.learn(train_data[i], train_class[i]) LOGGER.info('Testing PCA k-NN') numWinnerFailures = 0 numInferenceFailures = 0 numDistFailures = 0 numAbsErrors = 0 for i in range(numTests): winner, inference, dist, categoryDist = knn.infer(test_data[i]) pca_winner, pca_inference, pca_dist, pca_categoryDist \ = pca_knn.infer(test_data[i]) if winner != test_class[i]: numAbsErrors += 1 if pca_winner != winner: numWinnerFailures += 1 if (numpy.abs(pca_inference - inference) > 1e-4).any(): numInferenceFailures += 1 if (numpy.abs(pca_dist - dist) > 1e-4).any(): numDistFailures += 1 s0 = 100 * float(numTests - numAbsErrors) / float(numTests) s1 = 100 * float(numTests - numWinnerFailures) / float(numTests) s2 = 100 * float(numTests - numInferenceFailures) / float(numTests) s3 = 100 * float(numTests - numDistFailures) / float(numTests) LOGGER.info('PCA/k-NN success rate=%s%s', s0, '%') LOGGER.info('Winner success=%s%s', s1, '%') LOGGER.info('Inference success=%s%s', s2, '%') LOGGER.info('Distance success=%s%s', s3, '%') self.assertEqual(s1, 100.0, "PCA/k-NN test failed")