def train_classifiers(train_collection, hd, file_ext, cls_type='svc'): train_data, train_labels, label_names, files = fe.generate_dataset(train_collection, hd, file_ext, False) clf_l = Classifier('svc') clf_r = Classifier('svc') clf_l.train(train_data[0], train_labels[0]) clf_r.train(train_data[1], train_labels[1]) return clf_l, clf_r, label_names
train_data, train_labels, train_label_names, _ = fe.generate_dataset(train_collection, hd, 'bmp', features, summarize) test_data_1, test_labels_1, test_label_names_1, _ = fe.generate_dataset(test_collection_b, hd_kin, 'bin', features, summarize) # Still Hand - Fingers Down test_data_2, test_labels_2, test_label_names_2, _ = fe.generate_dataset(test_collection_c, hd_kin, 'bin', features, summarize) # Scale test_data_3, test_labels_3, test_label_names_3, _ = fe.generate_dataset(test_collection_d, hd_kin, 'bin', features, summarize) # Practice Piece assert train_label_names == test_label_names_1, 'Train and test dataset do not have the same labels' assert train_label_names == test_label_names_2, 'Train and test dataset do not have the same labels' assert train_label_names == test_label_names_3, 'Train and test dataset do not have the same labels' # Test with SVC # left hand cls_svc = Classifier(cls_type='svc') cls_svc.train(train_data[0], train_labels[0]) score_svc_left_1 = cls_svc.score(test_data_1[0], test_labels_1[0]) score_svc_left_2 = cls_svc.score(test_data_2[0], test_labels_2[0]) score_svc_left_3 = cls_svc.score(test_data_3[0], test_labels_3[0]) # right hand cls_svc = Classifier(cls_type='svc') cls_svc.train(train_data[1], train_labels[1]) score_svc_right_1 = cls_svc.score(test_data_1[1], test_labels_1[1]) score_svc_right_2 = cls_svc.score(test_data_2[1], test_labels_2[1]) score_svc_right_3 = cls_svc.score(test_data_3[1], test_labels_3[1])
def error_analysis(collection, train_collection, background_model): global l_avg_accum, r_avg_accum train_data, train_labels, label_names, files = fe.generate_dataset(train_collection, background_model) clf_l = Classifier('svc') clf_r = Classifier('svc') clf_l.train(train_data[0], train_labels[0]) clf_r.train(train_data[1], train_labels[1]) l_confusion_matrix = np.zeros((len(label_names), len(label_names))) r_confusion_matrix = np.zeros((len(label_names), len(label_names))) with open(collection, 'r') as f_collection: print print print '***********************************' print 'Error Analysis:', collection print '***********************************' # print '\t%s\t\t\t\t%s\t%s\t%s' % ('File Path', 'L', 'R', 'Actual') l_errors = 0 r_errors = 0 n_samples = 0 sample_size = 10 # open each video in the collection for line in f_collection: a_line = line.replace('\n','').split('\t') # remove line breaks and split at tab folder = a_line[0] # first element is folder containing video images label = a_line[1] # next element is the ground truth label # initialize hand detector if label is '2': hd = HandDetector('data/background_model_error2', folder) ### UPDATE: This is a hack for fixing some issues with error 2: will update after ICMC paper else: hd = HandDetector(background_model, folder) right_sample = [] left_sample = [] i = 0 for left_hand, right_hand, filepath, img in hd.hand_generator(): if i<sample_size: i += 1 right_sample.append(fe.extract_features(right_hand)) # extract the features left_sample.append(fe.extract_features(left_hand)) else: l_prediction = clf_l.majority_predict(np.array(left_sample)) r_prediction = clf_r.majority_predict(np.array(right_sample)) l_correct = l_prediction == int(label) r_correct = r_prediction == int(label) # if not (l_correct and r_correct): # print '\t%s\t%s\t%s\t%s' % (filepath, l_prediction, r_prediction, label) l_confusion_matrix[int(label)][l_prediction]+=1 r_confusion_matrix[int(label)][r_prediction]+=1 if not l_correct: l_errors+=1 if not r_correct: r_errors+=1 # Reset sample and populate with current hands right_sample = [] left_sample = [] right_sample.append(fe.extract_features(right_hand)) left_sample.append(fe.extract_features(left_hand)) i = 1 n_samples+=1 print print 'Error Rate:', collection print 'Left:', (l_errors / float(n_samples)) print 'Right:', (r_errors / float(n_samples)) print print l_avg_accum += (l_errors / float(n_samples)) r_avg_accum += (r_errors / float(n_samples))