def test_k_nearest_neighbor(): datasets = [ os.path.join('data', x) for x in os.listdir('data') if os.path.splitext(x)[-1] == '.json' ] aggregators = ['mean', 'mode', 'median'] distances = ['euclidean', 'manhattan', 'cosine'] for data_path in datasets: # Load data and make sure its shape is correct features, targets = load_json_data(data_path) targets = targets[:, None] # expand dims for d in distances: for a in aggregators: # make model and fit knn = KNearestNeighbor(1, distance_measure=d, aggregator=a) knn.fit(features, targets) # predict and calculate accuracy labels = knn.predict(features) acc = accuracy(targets, labels) # error if there's an issue msg = 'Failure with dataset: {}. Settings: dist={}, agg={}.'.format( data_path, d, a) assert (acc == 1.0), msg
def test_perceptron(): features, targets = load_json_data('data/parallel_lines.json') p = Perceptron(max_iterations=100) p.fit(features, targets) targets_hat = p.predict(features) #your perceptron should fit this dataset perfectly assert np.allclose(targets, targets_hat)
def test_transform_data(): features, targets = load_json_data('data/transform_me.json') features_transform = transform_data(features) p = Perceptron(max_iterations=100) p.fit(features_transform, targets) targets_hat = p.predict(features_transform) #your perceptron should fit this dataset perfectly after transforming the data assert np.allclose(targets, targets_hat)