and the second entry is the average of the bottom half of the image = rows floor(m/2) [inclusive] to m """ n_samples = len(x) m = len(x[0]) n = len(x[0, 0]) cut = int(np.floor(m / 2)) result = [] for i in range(n_samples): top_half = x[i, :cut, :] bottom_half = x[i, cut:, :] tb = np.array([np.mean(top_half), np.mean(bottom_half)]) result.append(tb) result = np.array(result) return result.T # use this function to evaluate accuracy acc_row = hw3.get_classification_accuracy(row_average_features(data), labels) acc_col = hw3.get_classification_accuracy(col_average_features(data), labels) acc_tb = hw3.get_classification_accuracy(top_bottom_features(data), labels) print("row feature accuracy is {}, column accuracy is {}, \ top/bottom accuracy is {} ".format(acc_row, acc_col, acc_tb)) #------------------------------------------------------------------------------- # Analyze MNIST data #------------------------------------------------------------------------------- # Your code here to process the MNIST data
raise Exception("modify me!") def col_average_features(x): """ @param x (n_samples,m,n) array with values in (0,1) @return (n,n_samples) array where each entry is the average of a column """ raise Exception("modify me!") def top_bottom_features(x): """ @param x (n_samples,m,n) array with values in (0,1) @return (2,n_samples) array where the first entry of each column is the average of the top half of the image = rows 0 to floor(m/2) [exclusive] and the second entry is the average of the bottom half of the image = rows floor(m/2) [inclusive] to m """ raise Exception("modify me!") # use this function to evaluate accuracy acc = hw3.get_classification_accuracy(raw_mnist_features(data), labels) print(acc) #------------------------------------------------------------------------------- # Analyze MNIST data #------------------------------------------------------------------------------- # Your code here to process the MNIST data
#add the caculated features to the data set row_feature_set = np.concatenate((row_feature_set, row_feat), axis=1) col_feature_set = np.concatenate((col_feature_set, col_feat), axis=1) top_bot_feature_set = np.concatenate( (top_bot_feature_set, top_bot_feat), axis=1) return row_feature_set, col_feature_set, top_bot_feature_set flat = raw_mnist_features(data) new_features = extracted_feature_set(data) print(new_features.shape) print(labels.shape) # use this function to evaluate accuracy # acc = hw3.get_classification_accuracy(raw_mnist_features(data), labels) acc = hw3.get_classification_accuracy(extracted_feature_set(data), labels) print('combined feature accuracy:', acc) #part 6.2 evaluations #generate the feature sets row_feature_set, col_feature_set, top_bot_feature_set = iterate_individual_feature( data) #evaluate the accuracy of each feature set by calling get_classification_accuracy for feat in (row_feature_set, col_feature_set, top_bot_feature_set): acc = hw3.get_classification_accuracy(feat, labels) print(f'accuracy for feature set is : {acc}') #------------------------------------------------------------------------------- # Analyze MNIST data #------------------------------------------------------------------------------- # Your code here to process the MNIST data