def main(): """ Here we choose what we want to execute """ randomCloud = False example = False iris = False irisCv = False leaf = True timing = False # if true then functions are timed if randomCloud: # we generate a random dataset with following properties: num points in dims # dimensions, with coordinate values contained between min and max. # we then build a k-d tree of this dataset and print it print("\n\n" + 100 * "=" + "\nrandomCloud\n\n") num = 10000 dims = 3 min_ = -1000 max_ = 1000 cloud = gen_cloud(num, dims, min_, max_) if timing: randomTree = timed_create_tree(cloud, dims) else: randomTree = create_tree(cloud, dims) if num <= 100: print(randomTree) # calculate nearest neighbours of randomly generated point point = gen_cloud(1, dims, min_, max_)[0] candidates = [] if timing: timed_nearest_neighbours(point=point, node=randomTree, candidateList=candidates, k=3) else: nearest_neighbours(point=point, node=randomTree, candidateList=candidates, k=3) print_neighbours(candidates) if example: # we use the data from # gopalcdas.com/2017/05/24/construction-of-k-d-tree-and-using-it-for-nearest-neighbour-search/ # to create the trees and search for k nearest neighbours for the point to classify print("\n\n" + 100 * "=" + "\nexample\n\n") dims = 2 cloud, labels = load_dataset_example() labelDic = to_dict(cloud, labels) if timing: tree = timed_create_tree(cloud, dims) else: tree = create_tree(cloud, dims) print(tree) # for just one point point = [4, 8] candidates = [] if timing: timed_nearest_neighbours(point=point, node=tree, candidateList=candidates, k=3) else: nearest_neighbours(point=point, node=tree, candidateList=candidates, k=3) print("nearest neighbours of", point, ":") print_neighbours(candidates) # for multiple points cloud2 = [[3, 6], [3, 7], [1, 9]] if timing: predictions = timed_batch_knn(cloud, cloud2, labelDic, 2) else: predictions = batch_knn(cloud, cloud2, labelDic, 2) print("naive", naive_knn(cloud, cloud2, labelDic, 2)) print(predictions) if iris: # we test the performance of our method using data from the iris dataset and plots the results print("\n\n" + 100 * "=" + "\nIRIS\n\n") ( pointsTrain, targetTrain, pointsTest, targetTest, toPlotTrain, toPlotTest, ) = load_dataset_iris(twoClasses=False) pointsDictTrain = to_dict(pointsTrain, targetTrain) pointsDictTest = to_dict(pointsTest, targetTest) dicIris = {**pointsDictTrain, **pointsDictTest} predictions1 = timed_batch_knn(pointsTrain, pointsTest, pointsDictTrain, 2) predictions2 = timed_naive_knn(pointsTrain, pointsTest, pointsDictTrain, 2) print_preds(predictions1, pointsDictTest) print("naive") print_preds(predictions2, pointsDictTest) plot_points(toPlotTrain, targetTrain, toPlotTest, predictions) if irisCv: kList = [1, 2, 5, 10, 20] cvResultTest, cvResultTrain = cv(pointsTrain, 0.1, 2, kList, dicIris, 10) print(cvResultTest, cvResultTrain) cv_plotter(kList, cvResultTest, cvResultTrain) if leaf: # we test the performance of our algorithm using the leaf dataset and k-fold # cross validation, which is in the train.csv file # we plot the results of the CV. The leaf dataset has a high dimensionality print("\n\n" + 100 * "=" + "\nleaf\n\n") x, y = load_dataset_leaf() dic = to_dict(x, y) predictions1 = timed_batch_knn(x[:-20], x[-20:], dic, 1) print_preds(predictions1, dic) kList = [1, 2, 5, 10, 20] cvResultTest, cvResultTrain = timed_cv(x, 0.1, 10, kList, dic, 2) cv_plotter(kList, cvResultTest, cvResultTrain)
def get_landing(): landing = Landing.query.all() output = to_dict(landing) return jsonify(output)
def get_instruments(): instruments = Instrument.query.all() output = to_dict(instruments) return jsonify(output)
def get_sections(): sections = Section.query.all() output = to_dict(sections) return jsonify(output)
def get_links(): links = Section.query.all() output = to_dict(links) return (jsonify(output))
async def get_message_history(request): pgpool = request.app['pgpool'] async with pgpool.acquire() as conn: records = await conn.fetch( 'SELECT create_time, value, client_id FROM message') return web.json_response(data=[to_dict(r) for r in records])