Beispiel #1
0
def train_classifiers(train_collection, hd, file_ext, cls_type='svc'):


	train_data, train_labels, label_names, files = fe.generate_dataset(train_collection, hd, file_ext, False)

	clf_l = Classifier('svc')
	clf_r = Classifier('svc')

	clf_l.train(train_data[0], train_labels[0])
	clf_r.train(train_data[1], train_labels[1])

	return clf_l, clf_r, label_names
Beispiel #2
0
features = 'honv'

train_data, train_labels, train_label_names, _ = fe.generate_dataset(train_collection, hd, 'bmp', features, summarize)
test_data_1, test_labels_1, test_label_names_1, _ = fe.generate_dataset(test_collection_b, hd_kin, 'bin', features, summarize)		# Still Hand - Fingers Down
test_data_2, test_labels_2, test_label_names_2, _ = fe.generate_dataset(test_collection_c, hd_kin, 'bin', features, summarize)		# Scale
test_data_3, test_labels_3, test_label_names_3, _ = fe.generate_dataset(test_collection_d, hd_kin, 'bin', features, summarize)		# Practice Piece

assert train_label_names == test_label_names_1, 'Train and test dataset do not have the same labels'
assert train_label_names == test_label_names_2, 'Train and test dataset do not have the same labels'
assert train_label_names == test_label_names_3, 'Train and test dataset do not have the same labels'



# Test with SVC
# 	left hand
cls_svc = Classifier(cls_type='svc')
cls_svc.train(train_data[0], train_labels[0])

score_svc_left_1 = cls_svc.score(test_data_1[0], test_labels_1[0])
score_svc_left_2 = cls_svc.score(test_data_2[0], test_labels_2[0])
score_svc_left_3 = cls_svc.score(test_data_3[0], test_labels_3[0])

#	right hand
cls_svc = Classifier(cls_type='svc')
cls_svc.train(train_data[1], train_labels[1])

score_svc_right_1 = cls_svc.score(test_data_1[1], test_labels_1[1])
score_svc_right_2 = cls_svc.score(test_data_2[1], test_labels_2[1])
score_svc_right_3 = cls_svc.score(test_data_3[1], test_labels_3[1])

Beispiel #3
0
collection_3 = 'data/cv_all_3.col'
collection_4 = 'data/cv_all_combined.col'

hd_rs = DetectorFactory.get_hand_detector('realsense', 'grey')
hd_kin = DetectorFactory.get_hand_detector('kinect', 'depth')

summarize = False
features = 'hog'

data_1, labels_1, train_label_names_1, _ = fe.generate_dataset(collection_1, hd_rs, 'bmp', features, summarize)
data_2, labels_2, train_label_names_2, _ = fe.generate_dataset(collection_2, hd_kin, 'bin', features, summarize)
data_3, labels_3, train_label_names_3, _ = fe.generate_dataset(collection_3, hd_kin, 'bin', features, summarize)
data_4, labels_4, train_label_names_3, _ = fe.generate_dataset(collection_4, hd_kin, 'bin', features, summarize)

cv = 5
clf = Classifier('svc')

scores_l_1 = clf.cross_validation(data_1[0], labels_1[0], cv)
scores_r_1 = clf.cross_validation(data_1[1], labels_1[1], cv)

scores_l_2 = clf.cross_validation(data_2[0], labels_2[0], cv)
scores_r_2 = clf.cross_validation(data_2[1], labels_2[1], cv)

scores_l_3 = clf.cross_validation(data_3[0], labels_3[0], cv)
scores_r_3 = clf.cross_validation(data_3[1], labels_3[1], cv)

scores_l_4 = clf.cross_validation(data_4[0], labels_4[0], cv)
scores_r_4 = clf.cross_validation(data_4[1], labels_4[1], cv)


print "#### Cross Validation Results ####"
Beispiel #4
0
def error_analysis(collection, train_collection, background_model):

	global l_avg_accum, r_avg_accum

	train_data, train_labels, label_names, files = fe.generate_dataset(train_collection, background_model)

	clf_l = Classifier('svc')
	clf_r = Classifier('svc')

	clf_l.train(train_data[0], train_labels[0])
	clf_r.train(train_data[1], train_labels[1])

	l_confusion_matrix = np.zeros((len(label_names), len(label_names)))
	r_confusion_matrix = np.zeros((len(label_names), len(label_names)))

	with open(collection, 'r') as f_collection:

		print
		print
		print '***********************************'
		print 'Error Analysis:', collection
		print '***********************************'
		# print  '\t%s\t\t\t\t%s\t%s\t%s' % ('File Path', 'L', 'R', 'Actual')

		l_errors = 0
		r_errors = 0
		n_samples = 0

		sample_size = 10

		# open each video in the collection
		for line in f_collection:
			a_line = line.replace('\n','').split('\t')  # remove line breaks and split at tab
			folder = a_line[0]							# first element is folder containing video images
			label = a_line[1]							# next element is the ground truth label

			# initialize hand detector
			if label is '2':
				hd = HandDetector('data/background_model_error2', folder)   ### UPDATE: This is a hack for fixing some issues with error 2:  will update after ICMC paper
			else:
				hd = HandDetector(background_model, folder)


			right_sample = []
			left_sample = []
			i = 0

			for left_hand, right_hand, filepath, img in hd.hand_generator():

				if i<sample_size:

					i += 1

					right_sample.append(fe.extract_features(right_hand))	# extract the features
					left_sample.append(fe.extract_features(left_hand))

				else:

					l_prediction = clf_l.majority_predict(np.array(left_sample))
					r_prediction = clf_r.majority_predict(np.array(right_sample))

					l_correct = l_prediction == int(label)
					r_correct = r_prediction == int(label)

					# if not (l_correct and r_correct):
					# 	print  '\t%s\t%s\t%s\t%s' % (filepath, l_prediction, r_prediction, label)

					l_confusion_matrix[int(label)][l_prediction]+=1
					r_confusion_matrix[int(label)][r_prediction]+=1

					if not l_correct:
						l_errors+=1

					if not r_correct:
						r_errors+=1


					# Reset sample and populate with current hands
					right_sample = []
					left_sample = []

					right_sample.append(fe.extract_features(right_hand))
					left_sample.append(fe.extract_features(left_hand))

					i = 1

					n_samples+=1

		print
		print 'Error Rate:', collection
		print 'Left:', (l_errors / float(n_samples))
		print 'Right:', (r_errors / float(n_samples))
		print 
		print

		l_avg_accum += (l_errors / float(n_samples))
		r_avg_accum += (r_errors / float(n_samples))