def test_log_likelihood(self): # check that log likelihood is maximal at true parameters nclasses, nitems = 3, 1500 * 8 # create random model and data (this is our ground truth model) true_model = ModelBtLoopDesign.create_initial_state(nclasses) annotations = true_model.generate_annotations(nitems) max_llhood = true_model.log_likelihood(annotations) # perturb gamma for _ in range(20): theta = true_model.theta gamma = np.random.normal(loc=true_model.gamma, scale=0.1) gamma = np.clip(gamma, 0., 1.) gamma /= gamma.sum() model = ModelBtLoopDesign(nclasses, gamma, theta) llhood = model.log_likelihood(annotations) self.assertGreater(max_llhood, llhood) # perturb theta for _ in range(20): gamma = true_model.gamma theta = np.random.normal(loc=true_model.theta, scale=0.1) theta = np.clip(theta, 0., 1.) model = ModelBtLoopDesign(nclasses, gamma, theta) llhood = model.log_likelihood(annotations) self.assertGreater(max_llhood, llhood)
def test_inference(self): # perfect annotation, check that inferred label is correct nclasses, nitems = 3, 50 * 8 # create random model (this is our ground truth model) gamma = np.ones((nclasses, )) / float(nclasses) theta = np.ones((8, )) * 0.999 true_model = ModelBtLoopDesign(nclasses, gamma, theta) # create random data labels = true_model.generate_labels(nitems) annotations = true_model.generate_annotations_from_labels(labels) posterior = true_model.infer_labels(annotations) testing.assert_allclose(posterior.sum(1), 1., atol=1e-6, rtol=0.) inferred = posterior.argmax(1) testing.assert_equal(inferred, labels) self.assertTrue(np.all(posterior[np.arange(nitems), inferred] > 0.999)) # at chance annotation, disagreeing annotators: get back prior gamma = ModelBtLoopDesign._random_gamma(nclasses) theta = np.ones((8, )) / float(nclasses) model = ModelBtLoopDesign(nclasses, gamma, theta) data = np.array([[ MV, 0, 1, 2, MV, MV, MV, MV, ]]) testing.assert_almost_equal(np.squeeze(model.infer_labels(data)), model.gamma, 6)