def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: smaller than above """ super(gpnet2_1_2, self).__init__() self.mynet = nn.Sequential( nn.Linear(2, 40), nn.BatchNorm1d(40), nn.Tanh(), nn.Linear(40, 6), nn.BatchNorm1d(6), nn.Tanh(), nn.Linear(6, 1), # nn.Sigmoid() # nn.Linear(1, 1, bias=False) ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: """ super(gpnet1_2_1, self).__init__() self.mynet1 = nn.Sequential(nn.Linear(1, 30), nn.Tanh(), nn.Linear(30, 30), nn.Tanh(), nn.Linear(30, 6), nn.Tanh(), nn.Linear(6, 1), nn.Sigmoid(), nn.Linear(1, 1, bias=False)) self.mynet2 = nn.Sequential(nn.Linear(1, 30), nn.Tanh(), nn.Linear(30, 6), nn.Tanh(), nn.Linear(6, 1)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: """ super(gpnet1_2_2, self).__init__() self.mynet1 = nn.Sequential( nn.Linear(1, 30), nn.Tanh(), # nn.Dropout(p=0.0), nn.Linear(30, 20), nn.Tanh(), # nn.Dropout(p=0.0), nn.Linear(20, 10), nn.Sigmoid(), nn.Linear(10, 1)) self.mynet2 = nn.Sequential( # nn.Linear(1, 10), # nn.Tanh(), # nn.Linear(10, 5), # nn.Tanh(), # nn.Linear(5, 1) ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: three outputs of same net, no sigmoid outputs """ super(gpnet2_3_1, self).__init__() self.mynet = nn.Sequential(nn.Linear(2, 20), nn.Tanh(), nn.Linear(20, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, 20), nn.Tanh(), nn.Linear(20, 3)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: as net 2_2_4, but more neurons """ super(gpnet2_2_6, self).__init__() self.mynet = nn.Sequential(nn.Linear(2, 20), nn.Tanh(), nn.Linear(20, 70), nn.Tanh(), nn.Linear(70, 70), nn.Tanh(), nn.Linear(70, 70), nn.Tanh(), nn.Linear(70, 70), nn.Tanh(), nn.Linear(70, 70), nn.Tanh(), nn.Linear(70, 20), nn.Tanh(), nn.Linear(20, 2)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=1, sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: """ super(gpnet1_1_3, self).__init__() self.mynet = nn.Sequential( nn.Linear(1, 12), nn.Tanh(), nn.Linear(12, 6), nn.Sigmoid(), nn.Linear(6, 1), # nn.Hardtanh() # nn.Linear(1, 1, bias=False) ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=[lengthscale], sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: different nets: one with sigmoid output """ super(gpnet2_2_2, self).__init__() self.mynet1 = nn.Sequential(nn.Linear(2, 40), nn.Tanh(), nn.Linear(40, 20), nn.Tanh(), nn.Linear(20, 10), nn.Tanh(), nn.Linear(10, 5), nn.Sigmoid(), nn.Linear(5, 1)) self.mynet2 = nn.Sequential( nn.Linear(2, 20), nn.Tanh(), nn.Linear(20, 10), nn.Tanh(), nn.Linear(10, 1), ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: deeper than the previous net """ super(gpnet2_1_4, self).__init__() self.mynet = nn.Sequential(nn.Linear(2, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 90), nn.Tanh(), nn.Linear(90, 1)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: bit less neurons than previous """ super(gpnet2_1_5, self).__init__() self.mynet = nn.Sequential( nn.Linear(2, 30), nn.Tanh(), nn.Linear(30, 30), nn.Tanh(), nn.Linear(30, 50), nn.Tanh(), nn.Linear(50, 50), nn.Tanh(), nn.Linear(50, 30), nn.Tanh(), nn.Linear(30, 30), nn.Tanh(), nn.Linear(30, 30), nn.Tanh(), nn.Linear(30, 1), ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: larger than previous """ super(gpnet2_1_9, self).__init__() self.mynet = nn.Sequential(nn.Linear(2, 500), nn.Tanh(), nn.Linear(500, 400), nn.Tanh(), nn.Linear(400, 320), nn.Tanh(), nn.Linear(320, 256), nn.Tanh(), nn.Linear(256, 204), nn.Tanh(), nn.Linear(204, 162), nn.Tanh(), nn.Linear(162, 131), nn.Tanh(), nn.Linear(131, 104), nn.Tanh(), nn.Linear(104, 83), nn.Tanh(), nn.Linear(83, 66), nn.Tanh(), nn.Linear(66, 40), nn.Tanh(), nn.Linear(40, 1)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: two outputs of same net, with sigmoid output """ super(gpnet2_1_11, self).__init__() self.mynet = nn.Sequential( nn.Linear(2, 60), nn.Tanh(), nn.Linear(60, 30), nn.Tanh(), nn.Linear(30, 20), nn.Tanh(), nn.Linear(20, 6), nn.Tanh(), nn.Linear(6, 1), ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=1, sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: """ super(gpnet1_1_2, self).__init__() self.mynet = nn.Sequential( nn.Linear(1, 20), # ExpPow(), nn.Tanh(), nn.Linear(20, 8), # nn.Sigmoid(), nn.Tanh(), nn.Linear(8, 2), # ExpPow(), nn.Tanh(), nn.Linear(2, 1), # nn.Sigmoid(), # nn.Linear(1, 1) ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=[lengthscale], sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: small with no sigmoid """ super(gpnet2_1_6, self).__init__() self.mynet = nn.Sequential( nn.Linear(2, 10), nn.Tanh(), nn.Linear(10, 10), nn.Tanh(), nn.Linear(10, 10), nn.Tanh(), nn.Linear(10, 6), nn.Tanh(), nn.Linear(6, 1), ) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: fewer neurons than previous """ super(gpnet2_1_8, self).__init__() # self.mynet = nn.Sequential( # nn.Linear(2, 10), # # nn.BatchNorm1d(10), # nn.Tanh(), # nn.Linear(10, 10), # # nn.BatchNorm1d(10), # nn.Tanh(), # nn.Linear(10, 5), # # nn.BatchNorm1d(5), # nn.Tanh(), # nn.Linear(5, 2), # # nn.BatchNorm1d(2), # nn.Tanh(), # nn.Linear(2, 1), # # nn.Sigmoid(), # # nn.Linear(1, 1, bias=False) # ) self.mynet = nn.Sequential( nn.Linear(2, 6), # nn.BatchNorm1d(10), nn.Tanh(), # nn.Linear(20, 10), # nn.BatchNorm1d(10), # nn.Tanh(), # nn.Linear(10, 5), # nn.BatchNorm1d(5), # nn.Tanh(), # nn.Linear(5, 2), # nn.BatchNorm1d(2), nn.Tanh(), nn.Linear(6, 1), nn.Sigmoid(), # nn.Linear(1, 1, bias=False) ) # self.scale = torch.nn.Parameter(torch.Tensor([10.0])) # self.scale2 = torch.nn.Parameter(torch.Tensor([10.0])) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False self.npar = numel(self) self.pureGP = False
def __init__(self, sigma_f=1, lengthscale=[1, 1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: pure GP """ super(gpnet2_2_1, self).__init__() self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = True
def __init__(self, sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc(type='se')): """ Description: exponential decay of neurons """ super(gpnet2_1_7, self).__init__() self.mynet = nn.Sequential(nn.Linear(2, 300), nn.Tanh(), nn.Linear(300, 150), nn.Tanh(), nn.Linear(150, 75), nn.Tanh(), nn.Linear(75, 30), nn.Tanh(), nn.Linear(30, 1)) self.gp = gprh.GP_new(sigma_f=sigma_f, lengthscale=lengthscale, sigma_n=sigma_n, covfunc=covfunc) self.npar = numel(self) self.pureGP = False
# dom_points dom_points = test_x ## STEP 1 # set appr params m = [ 90 ] # nr of basis functions in each latent direction: Change this to add latent outputs diml = len(m) # nr of latent outputs # select model model = gpnets.gpnet1_1_1(sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=gprh.covfunc('matern', nu=2.5)) tun = 30 # scaling parameter for L print('Number of parameters: %d' % model.npar) # loss function lossfu = gprh.NegLOOCrossValidation_phi_noBackward(model.gp.covfunc) # lossfu = gprh.NegMarginalLogLikelihood_phi_noBackward(model.gp.covfunc) # optimiser optimiser = FullBatchLBFGS(model.parameters(), lr=1, history_size=10) # STEP 2/3 mybestnet = gpnets.gpnet1_2_2(sigma_f=1, lengthscale=[1, 1], sigma_n=1) optimiser2 = FullBatchLBFGS(mybestnet.parameters(), lr=1, history_size=10)
# domain random points (used to limit L from below) ndx = 100 ndy = 100 nd = ndx*ndy Xd = np.linspace(-1, 1, ndx) Yd = np.linspace(-1, 1, ndy) Xd,Yd = np.meshgrid(Xd, Yd) dom_points = torch.from_numpy(np.concatenate((np.reshape(Xd,(nd,1)),np.reshape(Yd,(nd,1))),axis=1)).float() ######################## details ######################### ######### step 1 # set appr params m = [90,90] # nr of basis functions in each latent direction: Change this to add latent outputs covfunc1 = gprh.covfunc('matern',nu=2.5) tun1 = 30 model_basic = gpnets.gpnet2_2_1(sigma_f=1, lengthscale=[1], sigma_n=1, covfunc=covfunc1) # pure GP training_iterations_basic = 1 # loss function lossfu_basic = gprh.NegLOOCrossValidation_phi_noBackward(model_basic.gp.covfunc) # optimiser optimiser_basic = FullBatchLBFGS(model_basic.parameters(), lr=1, history_size=10) ######### step 2/3 covfunc2 = gprh.covfunc('matern',nu=2.5) tun = 30 # scaling parameter for L
# dom_points dom_points = test_x # STEP 1 # set appr params m2 = [150] tun2 = 30 diml = len(m2) # nr of latent outputs training_iterations = 200 regweight = 0.0001 mybestnet = gpnets.gpnet1_1_4(sigma_f=1,lengthscale=[1],sigma_n=1,covfunc=gprh.covfunc(type='matern',nu=2.5)) optimiser2 = FullBatchLBFGS(mybestnet.parameters(), lr=1, history_size=10) # STEP 3 int_method = 2 # 1) trapezoidal, 2) simpsons standard, 3) simpsons 3/8 ni = 200 # nr of intervals in numerical integration training_iterations2 = 80 regweight2 = 0.1 # optimiser optimiser3 = FullBatchLBFGS(mybestnet.parameters(), lr=1, history_size=10) lossfu3 = gprh.NegLOOCrossValidation_phi_noBackward(mybestnet.gp.covfunc) # lossfu3 = gprh.NegMarginalLogLikelihood_phi_noBackward(mybestnet.gp.covfunc)