def main(): N = 124 # sincd we only use 80% for training (~100) noiseVariance = .2 dataFunction = sinc X_train, X_test, T_train, T_test = initData(N, sinc, noiseVariance) clf = RVR(X_train, T_train, 'linearSplineKernel', useFast=False, betaFixed=False) clf.fit() print("The relevance vectors (%d):" % len(clf.relevanceVectors)) print(clf.relevanceVectors) T_pred, _ = clf.predict(X_test) # Plot training data X = np.linspace(-10, 10, 250) plt.plot(X, np.sin(X) / X, label='Orig. func') plt.scatter(X_train, T_train, s=20, label='Training samples', zorder=2) # Plot predictions predictedMu, _ = clf.predict(X) plt.plot(X, predictedMu, label='Pred. func (mean)', dashes=[2, 2]) # Plot relevance vectors plt.scatter(clf.relevanceVectors, clf.relevanceTargets, label="Relevance vectors", s=50, facecolors="none", color="k", zorder=10) print("Re-estimated sigma:") print(np.sqrt(1 / clf.beta)) plt.ylim(-0.3, 1.1) plt.xlabel("x") plt.ylabel("t") plt.legend() #plt.savefig("../plots/sincdataplotnoisy.png", bbox_inches="tight") plt.show()
def main(): N = 100 noiseSpread = 0.001 X_train, X_test, T_train, T_test = initData(N, cos, noiseSpread) # Very sensible to initial value of beta (as described in the paper) clf = RVR(X_train, T_train, 'RBFKernel', beta=0.001**-2, useFast=True, convergenceThresh=10**-2, maxIter=500) clf.fit() print("The relevance vectors:") print(clf.relevanceVectors) print("Number of relevance vectors", clf.relevanceVectors.shape[0]) print("Spread:", np.sqrt(clf.beta**-1)) print("beta:", clf.beta) T_pred, _ = clf.predict(X_test) # Plot training data plt.scatter(X_train, T_train, s=20, label='Training data') # Plot predictions plt.scatter(X_test, T_pred, s=20, color='r', label='Predictions') # Plot relevance vectors plt.scatter(clf.relevanceVectors, clf.relevanceTargets, label="Relevance vectors", s=50, facecolors="none", color="k", zorder=1) plt.xlabel("x") plt.ylabel("t") plt.legend() # plt.savefig("../plots/sincdataplot.png", bbox_inches="tight") plt.show()
def main(): N = None dataFunction = airfoil X_train, X_test, T_train, T_test = initData(N, dataFunction) clf = RVR(X_train, T_train, 'RBFKernel') clf.fit() print("Number of relevance vectors:") print(len(clf.relevanceVectors)) T_pred, _ = clf.predict(X_test) relError = np.mean( [abs(true - pred) / true for true, pred in zip(T_test, T_pred)]) absError = np.mean( [abs(true - pred) for true, pred in zip(T_test, T_pred)]) stdRelError = np.std( [abs(true - pred) / true for true, pred in zip(T_test, T_pred)]) stdAbsError = np.std( [abs(true - pred) for true, pred in zip(T_test, T_pred)]) print("Mean absolute error: ", absError, " / std: ", stdAbsError) print("Mean relative error: ", relError, " / std: ", stdRelError) # number of features - plot every feature/data dimension for i in range(5): # Plot training data plt.scatter(X_train[:, i], T_train, s=20, label='Training data') # Plot predictions plt.scatter(X_test[:, i], T_pred, s=20, color='r', label='Predictions') # Plot relevance vectors plt.scatter(clf.relevanceVectors[:, i], clf.relevanceTargets, label="Relevance vectors", s=50, facecolors="none", color="k", zorder=1) plt.xlabel("x") plt.ylabel("t") plt.legend() # plt.savefig("../plots/sincdataplot.png", bbox_inches="tight") plt.show()
from data import initData from flask_graphql import GraphQLView from schema import schema from mongoengine import connect from model import Task # from flask_pymongo import PyMongo app = Flask(__name__) default_query = """{ allTodo{ edges { node { id, title, description, done } } } }""" app.add_url_rule('/graphql', view_func=GraphQLView.as_view('graphql', schema=schema, graphiql=True)) if __name__ == "__main__": initData() app.run(debug=True)
#!/usr/bin/env python3 # -*- coding: UTF-8 -*- """Main loop of UltraViolet.""" import config import mailer import data import time time.sleep(5) # wait 5 secs for datebase to come online data.initData()