def SmoothKRR(): y = np.genfromtxt('F:\PY\data\stock.txt', skip_header=1, dtype=None, delimiter='\t', usecols=(1)) targetValues = smooth(y, len(y)) np.random.seed(10) trainingPoints = np.arange(28).reshape(-1, 1) testPoints = np.arange(29).reshape(-1, 1) knl = mlpy.kernel_gaussian(trainingPoints, trainingPoints, sigma=1) knlTest = mlpy.kernel_gaussian(testPoints, trainingPoints, sigma=1) knlRidge = mlpy.KernelRidge(lmb=0.01, kernel=None) knlRidge.learn(knl, targetValues) resultPoints = knlRidge.pred(knlTest) print resultPoints plt.step(trainingPoints, targetValues, 'o') plt.step(testPoints, resultPoints) plt.show()
def _q20(): import mlpy X_train, y_train, X_test, y_test = load_dataset() for gamma in [32, 2, 0.125]: rbf = mlpy.KernelGaussian(sigma=(2 * gamma) ** 0.5) for lamda in [0.001, 1, 1000]: clf = mlpy.KernelRidge(lmb=lamda, kernel=rbf) clf.learn(X_train, y_train) err = 0.0 for i in xrange(len(X_test)): if sign(clf.pred(X_test[i])) != y_test[i]: err += 1.0 print gamma, lamda, err / len(X_test)
def f(TrainIn, TrainOut, TestIn): print "init......" x = numpy.array(TrainIn) y = numpy.array(TrainOut) t = numpy.array(TestIn) print "learn......" k = mlpy.kernel_gaussian(x, x, sigma=1) kt = mlpy.kernel_gaussian(t, x, sigma=1) krr = mlpy.KernelRidge(lmb=0.01) krr.learn(k, y) print "out......" re = krr.pred(t) return re
y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1] y = np.genfromtxt("Gold.csv", skip_header=1, dtype=None, delimiter=',', usecols=(1)) targetValues = smooth(y, len(y)) np.random.seed(10) trainingPoints = np.arange(125).reshape(-1, 1) testPoints = np.arange(126).reshape(-1, 1) knl = mlpy.kernel_gaussian(trainingPoints, trainingPoints, sigma=1) knlTest = mlpy.kernel_gaussian(testPoints, trainingPoints, sigma=1) knlRidge = mlpy.KernelRidge(lmb=0.01, kernel=None) knlRidge.learn(knl, targetValues) resultPoints = knlRidge.pred(knlTest) print(resultPoints) plt.step(trainingPoints, targetValues, 'o') plt.step(testPoints, resultPoints) plt.show()
import numpy as np import matplotlib.pyplot as plt import mlpy np.random.seed(0) x = np.arange(0, 2, 0.05).reshape(-1, 1) # training points y = np.ravel(np.exp(x)) + np.random.normal(1, 0.2, x.shape[0]) # target values xt = np.arange(0, 2, 0.01).reshape(-1, 1) # testing points K = mlpy.kernel_gaussian(x, x, sigma=1) # training kernel matrix Kt = mlpy.kernel_gaussian(xt, x, sigma=1) # testing kernel matrix krr = mlpy.KernelRidge(lmb=0.01) krr.learn(K, y) yt = krr.pred(Kt) fig = plt.figure(1) plot1 = plt.plot(x[:, 0], y, 'o') plot2 = plt.plot(xt[:, 0], yt) plt.show()
source = [] target = [] for s in sentences: words = [] nwords = [] for k in range(len(s) - 1): words.append(lut[s[k]]) nwords.append(lut[s[k + 1]]) source.append(np.r_[words]) target.extend(np.r_[nwords]) for b in source[:10]: reservoir.execute(b) initial_state = reservoir.states[-1] states = [] for k in range(len(source)): reservoir.states = np.c_[initial_state].T states.append(reservoir.execute(source[k])) X = np.vstack(states) import mlpy K = mlpy.kernel_gaussian(X.T, X.T, sigma=1) readout = mlpy.KernelRidge(lmb=0.01) readout.learn(K, np.array(target)[:, 0]) kernel_distrib = readout.pred(X)
y = np.convolve(w / w.sum(), s, mode='same') return y[window_len:-window_len + 1] y = np.genfromtxt("/Users/hanzhao/PycharmProjects/MLstudy/file/Gold.csv", skip_header=1, dtype=None, delimiter=',', usecols=(1)) targetValues = smooth(y, len(y)) np.random.seed(10) trainingPoints = np.arange(125).reshape(-1, 1) testPoints = np.arange(126).reshape(-1, 1) kg = KernelGaussian() knl = kg.kernel(trainingPoints, trainingPoints) knlTest = kg.kernel(testPoints, trainingPoints) knlRidge = mlpy.KernelRidge(kernel=None) knlRidge.learn(knl, targetValues) resultPoints = knlRidge.pred(knlTest) print(resultPoints) plt.step(trainingPoints, targetValues, 'o') plt.step(testPoints, resultPoints) plt.show()