Example #1
0
def gprTorch_singleTask():
    """ 
        Single-Task GPR + heteroscedastic noise level
    """
    #synthetic data
    train_x = torch.linspace(0, 1, 75)
    sem_y1 = 0.05 + (0.55 - 0.05) * torch.linspace(0, 1, 75)
    train_y = torch.sin(train_x *
                        (2 * math.pi)) + sem_y1 * torch.randn(train_x.size())
    train_y_log_var = (sem_y1**2.).log()

    #model for noise
    log_noise_model = SingletaskGPModel(
        train_x,
        train_y_log_var,
        GaussianLikelihood(),
    )

    likelihood = _GaussianLikelihoodBase(
        noise_covar=HeteroskedasticNoise(log_noise_model), )
    #define the model
    model = SingletaskGPModel(train_x, train_y, likelihood)
    #training the model
    model.train()
    likelihood.train()
    #optimize the model hyperparameters
    optimizer = torch.optim.Adam(
        [  #Adam optimizer: https://arxiv.org/abs/1412.698
            {
                'params': model.parameters()
            },
        ],
        lr=0.1)

    #"Loss" for GPs - mll: marginal log likelihood
    mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
    nIter = 75
    for i in range(nIter):
        optimizer.zero_grad()
        output = model(train_x)
        loss = -mll(output, train_y, train_x)
        loss.backward()
        if (i + 1) % 10 == 0:
            print(
                '...... GPR-hyperParam Optimization, iter %d/%d - loss: %.3f' %
                (i + 1, nIter, loss.item()))
        optimizer.step()

    # GPR model with optimized hyperparameters
    model.eval()
    likelihood.eval()
Example #2
0
    def gprTorch_pd_singleTask(self):
        """ 
        GPR for p>1 uncertain parameter and single-variate response y
        """
        xTrain = self.xTrain
        yTrain = self.yTrain[:, 0]
        noiseSdev = self.noiseV
        xTest = self.xTest
        gprOpts = self.gprOpts
        p = self.p
        #(0) Assignments
        nIter = gprOpts[
            'nIter']  #number of iterations in optimization of hyperparameters
        lr_ = gprOpts[
            'lr']  #learning rate for the optimizaer of the hyperparameters
        torch.set_printoptions(
            precision=8
        )  #to avoid losing accuracy in print after converting to torch
        #(1) convert numpy arrays to torch tensors
        xTrain = torch.from_numpy(xTrain)
        yTrain = torch.from_numpy(yTrain)
        yLogVar = torch.from_numpy(np.log(noiseSdev**2.))

        #(2) Construct the GPR for the noise
        log_noise_model = SingletaskGPModel_mIn(
            xTrain,
            yLogVar,
            GaussianLikelihood(),
        )
        #(3) Construct GPR for f(q)
        #  (a) Likelihood
        likelihood = _GaussianLikelihoodBase(
            noise_covar=HeteroskedasticNoise(log_noise_model), )
        # likelihood = GaussianLikelihood(noise=noiseSdev**2.)
        ##common Gaussian likelihood with no inference for heteroscedastic noise levels

        #  (b) prior GPR model
        model = SingletaskGPModel_mIn(xTrain, yTrain, likelihood)

        #(3) Optimize the hyperparameters
        model.train()
        likelihood.train()
        optimizer = torch.optim.Adam(
            [
                {
                    'params': model.parameters()
                },  # Includes Likelihood parameters
                #                    {'params': list(model.parameters()) + list(likelihood.parameters())},
            ],
            lr=lr_)  #lr: learning rate
        # "Loss" for GPs - the marginal log likelihood
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
        losses = []
        lengthSc = [[] for _ in range(p)]
        for i in range(nIter):
            # Zero gradients from previous iteration
            optimizer.zero_grad()
            # Output from model
            output = model(xTrain)
            # Calc loss and backprop gradients
            loss = -mll(output, yTrain, xTrain)
            loss.backward()
            # optimize
            optimizer.step()
            # info on optimization
            loss_ = loss.item()
            lengthSc_ = []
            for j in range(p):
                lengthSc_.append(
                    model.covar_module.base_kernel.lengthscale.squeeze()
                    [j].item())
                #lengthSC_.append(model.covar_module.base_kernel.lengthscale.item())
                ##if all lengthscales are the same (see the definition of self.covar_module, above)
            if (i + 1) % 100 == 0:
                print(
                    '...... GPR-hyperparameters Optimization, iter %d/%d - loss: %.3f'
                    % ((i + 1), nIter, loss_),
                    end="  ")
                print('lengthscales=' + '%.3f ' * p % (tuple(lengthSc_)))
            losses.append(loss_)
            for j in range(p):
                lengthSc[j].append(lengthSc_[j])
        self.loss = losses
        self.lengthSc = lengthSc
        #print('lr=',optimizer.param_groups[0]['lr'])
        #print('pars',optimizer.param_groups[0]['params'])
        # Plot convergence of hyperparameters optimization
        if gprOpts['convPlot']:
            self.optim_conv_plot()
        #(4) Posteriors of GPR model with optimized hyperparameters
        model.eval()
        likelihood.eval()
        #(3) Prediction at test inputs
        with torch.no_grad():
            xTest = torch.from_numpy(xTest)
            post_f = model(xTest)
            post_obs = likelihood(post_f, xTest)
            self.post_f = post_f
            self.post_y = post_obs
Example #3
0
    def gprTorch_1d_singleTask(self):
        """ 
        GPR for 1D uncertain parameter and single-variate response y.
        """
        xTrain = self.xTrain
        yTrain = self.yTrain[:, 0]
        noiseSdev = self.noiseV
        xTest = self.xTest
        gprOpts = self.gprOpts
        #(0) Assignments
        nIter = gprOpts[
            'nIter']  #number of iterations in optimization of hyperparameters
        lr_ = gprOpts[
            'lr']  #learning rate for the optimizaer of the hyperparameters
        torch.set_printoptions(
            precision=8
        )  #to avoid losing accuracy in print after converting to torch
        #(1) convert numpy arrays to torch tensors
        xTrain = torch.from_numpy(xTrain)
        yTrain = torch.from_numpy(yTrain)
        yLogVar = torch.from_numpy(np.log(noiseSdev**2.))

        #(2) Construct GPR for noise
        log_noise_model = SingletaskGPModel(
            xTrain,
            yLogVar,
            GaussianLikelihood(),
        )
        #(3) Construct GPR for f(q)
        #  (a) Likelihood
        likelihood = _GaussianLikelihoodBase(
            noise_covar=HeteroskedasticNoise(log_noise_model), )
        #  (b) prior GPR model
        model = SingletaskGPModel(xTrain, yTrain, likelihood)
        #(4) Train the model
        model.train()
        likelihood.train()
        #(5) Optimize the model hyperparameters
        optimizer = torch.optim.Adam(
            [  #Adam optimizaer: https://arxiv.org/abs/1412.6980
                {
                    'params': model.parameters()
                },  # Includes GaussianLikelihood parameters
            ],
            lr=lr_)
        #   "Loss" for GPs - mll: marginal log likelihood
        mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model)
        losses = []
        lengthSc = []
        for i in range(nIter):
            optimizer.zero_grad()
            output = model(xTrain)
            loss = -mll(output, yTrain, xTrain)
            loss.backward()
            optimizer.step()
            losses.append(loss.item())
            lengthSc.append(model.covar_module.base_kernel.lengthscale.item())
            if (i + 1) % 100 == 0:
                print(
                    '...... GPR-hyperparameters Optimization, iter %d/%d - loss: %.3f - lengthsc: %.3f'
                    % (i + 1, nIter, losses[-1], lengthSc[-1]))
        self.loss = losses
        self.lengthSc = lengthSc
        # Plot convergence of hyperparameters optimization
        if gprOpts['convPlot']:
            self.optim_conv_plot()

        #(6) Posteriors of GPR model with optimized hyperparameters
        model.eval()
        likelihood.eval()
        #(7) Evaluate the posteriors at the test points
        with torch.no_grad():
            xTest = torch.from_numpy(xTest)
            post_f = model(xTest)
            post_obs = likelihood(post_f, xTest)
        self.post_f = post_f
        self.post_y = post_obs