Esempio n. 1
0
	def testSupervisedTraining(self):
		"""Test parameter fitting
	
		Come up with a good non-trivial way to test your training function	
		You can use the given icecream dataset or make up your own 
		
		Add your implementation
		"""
		seq1 = Instance(label = ['odd', 'even', 'odd', 'even', 'odd'], data = [3, 2, 1, 4, 1])
		seq2 = Instance(label = ['even', 'even', 'odd', 'odd', 'even'], data = [2, 4, 1, 3, 2])
		seq3 = Instance(label = ['even', 'even', 'odd', 'odd', 'odd'], data = [1, 2, 3, 4, 3])
		seq4 = Instance(label = ['odd', 'odd', 'even', 'even', 'even'], data = [4, 3, 4, 1, 2])
		instances = [seq1, seq2, seq3, seq4]
		hmm = HMM()
		hmm.train(instances)
		mystery = Instance(data = [2, 1, 3, 4, 2, 2, 1, 3])
		labels = hmm.classify_instance(mystery)
		self.assertEqual(labels, ['even', 'odd', 'odd', 'even', 'even', 'even', 'odd', 'odd'], 'NOOO')
	print compute_PER(references, hypotheses)
	
	hmm.print_parameters()
	"""
	
	#unsupervised training
	
	#for mode in ['uniform', 'random', 'sneaky']:
	for mode in ['sneaky']:
		hmm = HMM()
		hmm._be_prepared_for_baum_welch(training_set, 'sneaky')
		hmm.baum_welch_train(training_set) 
	
		#Experiment x.2
		hypotheses2 = [hmm.classify_instance(x) for x in training_set]
		references2 = [x.old_label for x in training_set]
	
		print "Over Training Set:"
		print compute_PER(references2, hypotheses2)
		
		#Experiment x.3
		hypotheses = [hmm.classify_instance(x) for x in test_set]
		references = [x.label for x in test_set]
	
		print "Over Test Set:"
		print compute_PER(references, hypotheses)
	
		hmm.print_parameters()