def doit(predictions, true_labels, expected): """Runs a single test case Parameters ========== predictions : list A list of integer predictions to input true_labels : list Ground truth values to compare to expected : float The expected classification-error rate Raises ====== AssertionError In case something goes wrong """ predictions = numpy.array(predictions) true_labels = numpy.array(true_labels) cer = analysis.CER(predictions, true_labels) assert numpy.isclose(cer, expected), "Expected %r, but got %r" % ( expected, cer, )
def test_one(protocol, variables): train = database.get(protocol, 'train', database.CLASSES, variables) norm = preprocessor.estimate_norm(numpy.vstack(train)) train_normed = preprocessor.normalize(train, norm) trainer = algorithm.MultiClassTrainer() machine = trainer.train(train_normed) test = database.get(protocol, 'test', database.CLASSES, variables) test_normed = preprocessor.normalize(test, norm) test_predictions = machine.predict(numpy.vstack(test_normed)) test_labels = algorithm.make_labels(test).astype(int) return analysis.CER(test_predictions, test_labels)
def test_one(protocol, variables): """Runs one single test, returns the CER on the test set""" # 1. get the data from our preset API for the database train = database.get(protocol, "train", database.CLASSES, variables) # 2. preprocess the data using our module preprocessor norm = preprocessor.estimate_norm(numpy.vstack(train)) train_normed = preprocessor.normalize(train, norm) # 3. trains our logistic regression system trainer = algorithm.MultiClassTrainer() machine = trainer.train(train_normed) # 4. applies the machine to predict on the 'unseen' test data test = database.get(protocol, "test", database.CLASSES, variables) test_normed = preprocessor.normalize(test, norm) test_predictions = machine.predict(numpy.vstack(test_normed)) test_labels = algorithm.make_labels(test).astype(int) return analysis.CER(test_predictions, test_labels)
def doit(predictions, true_labels, expected): '''Runs a single test case Parameters: predictions (list): A list of integer predictions to input true_labels (list): Ground truth values to compare to expected (float): The expected classification-error rate Raises: AssertionError: in case something goes wrong ''' predictions = numpy.array(predictions) true_labels = numpy.array(true_labels) cer = analysis.CER(predictions, true_labels) assert numpy.isclose(cer, expected), 'Expected %r, but got %r' % (expected, cer)
def doit(predictions, true_labels, expected): predictions = numpy.array(predictions) true_labels = numpy.array(true_labels) cer = analysis.CER(predictions, true_labels) assert numpy.isclose(cer, expected), 'Expected %r, but got %r' % (expected, cer)