def model(self, latent_dim: int = 1):
     '''Keras style layers + optimizers'''
     self.layers = []
     self.layers.append(Dense(32, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(16, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(8, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(latent_dim))
     self.layers.append(Scale(fixed=True, init_vals=10))  # why need scaling
     self.layers.append(CovMat(kernel='rbf', alpha_fixed=False))
     # self.layers.append(CovMat(kernel='rbf', alpha_fixed=False, alpha=self.noise))	# noise free
     # optimizer
     self.opt = Adam(1e-3)
Esempio n. 2
0
def Deep_Kernel_Gaussian_Process(x_train, y_train, x_test, y_test):
    layers = []
    layers.append(Dense(64, activation='tanh'))
    layers.append(Dense(64, activation='tanh'))
    layers.append(Dense(20))
    layers.append(CovMat(alpha=0.3, var=1.0, kernel='rbf'))
    opt = SciPyMin('l-bfgs-b')

    opt = Adam(1e-3)
    batch_size = 500
    gp = NNRegressor(layers,
                     opt=opt,
                     batch_size=batch_size,
                     maxiter=500,
                     gp=True,
                     verbose=True)
    gp.fit(x_train, y_train)

    #Can extract mapping z(x) and hyperparams for use in other learning algorithm
    alph = gp.layers[-1].s_alpha
    var = gp.layers[-1].var

    A_full = gp.fast_forward(x_train)
    kernel = ConstantKernel(var) * RBF(np.ones(1)) + WhiteKernel(alph)

    A_test = gp.fast_forward(x_test)
    gp1 = GaussianProcessRegressor(kernel, optimizer=None)

    if A_full.shape[0] > 1000:
        data_index = np.arange(0, A_full.shape[0])
        np.random.shuffle(data_index)
        ind = data_index[0:1000]
        gp1.fit(A_full[ind, :], y_train[ind, :])
    else:
        #gp1.fit(A_full[500,:],y_train[500,:])
        gp1.fit(A_full, y_train)

    mu, stdt = gp1.predict(A_test, return_std=True)
    labels = np.rint(mu)
    return labels
Esempio n. 3
0
np.random.seed(0)
x_train=np.random.random(size=(70,1))-0.5
y_train=f(x_train)+np.random.normal(0.0,0.01,size=x_train.shape)



layers=[]
#layers.append(Dense(64,activation='tanh'))
#layers.append(Dropout(0.99))
layers.append(Dense(6,activation='tanh'))
layers.append(Dropout(0.99))
layers.append(Dense(1))
layers.append(Scale(fixed=True,init_vals=64.0))
layers.append(CovMat(kernel='rbf',alpha_fixed=False))

opt=Adam(1e-3)
#opt=SciPyMin('l-bfgs-b')

gp=NNRegressor(layers,opt=opt,batch_size=x_train.shape[0],maxiter=10000,gp=True,verbose=True)
gp.fit(x_train,y_train)
#print(gp.grad_check(x_train[0:10],y_train[0:10]))
x_test=np.linspace(-0.7,0.7,1000).reshape(-1,1)



y_pred,std=gp.predict(x_test)


plt.plot(x_test,gp.layers[-2].out)
plt.xlabel('X')
plt.ylabel('Z')
Esempio n. 4
0
x_train = train_data[:, which_qs]
y_train = train_data[:, [-1]]
x_test = test_data[:, which_qs]
y_test = test_data[:, [-1]]

layers = []
n_out = 2
layers.append(Dense(100, activation='lrelu'))
#layers.append(Dropout(0.8))
layers.append(Dense(100, activation='lrelu'))
#layers.append(Dropout(0.8))
#layers.append(Dense(50,activation='lrelu'))
layers.append(Dense(n_out))
layers.append(CovMat(kernel='rbf', alpha_fixed=False))

opt = Adam(1e-4)

gp = NNRegressor(layers,
                 opt=opt,
                 batch_size=50,
                 maxiter=4000,
                 gp=True,
                 verbose=False)
gp.fit(x_train, y_train)

if len(which_qs) > 2 or n_out > 2 or True:
    ytr_pred, std = gp.predict(x_train)
    ytestpred, std = gp.predict(x_test)
    ydumb = np.mean(y_train)

    mse_train = np.mean((ytr_pred - y_train)**2)