def model(self, latent_dim: int = 1):
     '''Keras style layers + optimizers'''
     self.layers = []
     self.layers.append(Dense(32, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(16, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(8, activation='tanh'))
     self.layers.append(Dropout(keep_prob=0.9))
     self.layers.append(Dense(latent_dim))
     self.layers.append(Scale(fixed=True, init_vals=10))  # why need scaling
     self.layers.append(CovMat(kernel='rbf', alpha_fixed=False))
     # self.layers.append(CovMat(kernel='rbf', alpha_fixed=False, alpha=self.noise))	# noise free
     # optimizer
     self.opt = Adam(1e-3)
Exemple #2
0
def Deep_Kernel_Gaussian_Process(x_train, y_train, x_test, y_test):
    layers = []
    layers.append(Dense(64, activation='tanh'))
    layers.append(Dense(64, activation='tanh'))
    layers.append(Dense(20))
    layers.append(CovMat(alpha=0.3, var=1.0, kernel='rbf'))
    opt = SciPyMin('l-bfgs-b')

    opt = Adam(1e-3)
    batch_size = 500
    gp = NNRegressor(layers,
                     opt=opt,
                     batch_size=batch_size,
                     maxiter=500,
                     gp=True,
                     verbose=True)
    gp.fit(x_train, y_train)

    #Can extract mapping z(x) and hyperparams for use in other learning algorithm
    alph = gp.layers[-1].s_alpha
    var = gp.layers[-1].var

    A_full = gp.fast_forward(x_train)
    kernel = ConstantKernel(var) * RBF(np.ones(1)) + WhiteKernel(alph)

    A_test = gp.fast_forward(x_test)
    gp1 = GaussianProcessRegressor(kernel, optimizer=None)

    if A_full.shape[0] > 1000:
        data_index = np.arange(0, A_full.shape[0])
        np.random.shuffle(data_index)
        ind = data_index[0:1000]
        gp1.fit(A_full[ind, :], y_train[ind, :])
    else:
        #gp1.fit(A_full[500,:],y_train[500,:])
        gp1.fit(A_full, y_train)

    mu, stdt = gp1.predict(A_test, return_std=True)
    labels = np.rint(mu)
    return labels
Exemple #3
0
from dknet.optimizers import Adam,SciPyMin,SDProp
from sklearn.gaussian_process import GaussianProcessClassifier,GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel,ConstantKernel
def f(x):
	return (x+0.5>=0)*np.sin(64*(x+0.5)**4)#-1.0*(x>0)+numpy.

np.random.seed(0)
x_train=np.random.random(size=(70,1))-0.5
y_train=f(x_train)+np.random.normal(0.0,0.01,size=x_train.shape)



layers=[]
#layers.append(Dense(64,activation='tanh'))
#layers.append(Dropout(0.99))
layers.append(Dense(6,activation='tanh'))
layers.append(Dropout(0.99))
layers.append(Dense(1))
layers.append(Scale(fixed=True,init_vals=64.0))
layers.append(CovMat(kernel='rbf',alpha_fixed=False))

opt=Adam(1e-3)
#opt=SciPyMin('l-bfgs-b')

gp=NNRegressor(layers,opt=opt,batch_size=x_train.shape[0],maxiter=10000,gp=True,verbose=True)
gp.fit(x_train,y_train)
#print(gp.grad_check(x_train[0:10],y_train[0:10]))
x_test=np.linspace(-0.7,0.7,1000).reshape(-1,1)


from dknet.layers import Dense,Conv2D,MaxPool2D,Flatten,Dropout,CovMat,Scale
from dknet.optimizers import Adam,SciPyMin,SDProp, Adam2
from dknet.utils import load_mnist

from sklearn.gaussian_process import GaussianProcessClassifier,GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,WhiteKernel,ConstantKernel

(x_train,y_train),(x_test,y_test)=load_mnist(shuffle=True)
x_train=x_train.reshape(-1,28*28)
x_test=x_test.reshape(-1,28*28)

y_test=np.argmax(y_test,1).reshape(-1,1)
y_train=np.argmax(y_train,1).reshape(-1,1)

layers=[]
layers.append(Dense(64,activation='tanh'))
layers.append(Dense(64,activation='tanh'))
layers.append(Dense(20))
layers.append(CovMat(alpha=0.3,var=1.0,kernel='rbf'))
opt=SciPyMin('l-bfgs-b')
n_train = 3000
n_test = 100

opt=Adam(1e-3)
batch_size=500
gp=NNRegressor(layers,opt=opt,batch_size=batch_size,maxiter=500,gp=True,verbose=True)
gp.fit(x_train[:n_train],y_train[:n_train])



#Can extract mapping z(x) and hyperparams for use in other learning algorithm
Exemple #5
0
np.random.seed(1)
data = np.loadtxt('ltus_en.txt')
train_data = data[:1000]
test_full_data = data[1000:]
np.random.shuffle(test_full_data)
test_data = test_full_data[:1000]

which_qs = [2, 3]
x_train = train_data[:, which_qs]
y_train = train_data[:, [-1]]
x_test = test_data[:, which_qs]
y_test = test_data[:, [-1]]

layers = []
n_out = 2
layers.append(Dense(100, activation='lrelu'))
#layers.append(Dropout(0.8))
layers.append(Dense(100, activation='lrelu'))
#layers.append(Dropout(0.8))
#layers.append(Dense(50,activation='lrelu'))
layers.append(Dense(n_out))
layers.append(CovMat(kernel='rbf', alpha_fixed=False))

opt = Adam(1e-4)

gp = NNRegressor(layers,
                 opt=opt,
                 batch_size=50,
                 maxiter=4000,
                 gp=True,
                 verbose=False)