def test_setters(self):
        likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=0)

        a = torch.randn(3, 2)
        mat = a.matmul(a.transpose(-1, -2))

        # test rank 0 setters
        likelihood.noise = 0.5
        self.assertAlmostEqual(0.5, likelihood.noise.item())

        likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
        for i in range(3):
            self.assertAlmostEqual(0.04, likelihood.task_noises[i].item())

        with self.assertRaises(AttributeError) as context:
            likelihood.task_noise_covar = mat
        self.assertTrue("task noises" in str(context.exception))

        # test low rank setters
        likelihood = MultitaskGaussianLikelihood(num_tasks=3, rank=2)
        likelihood.noise = 0.5
        self.assertAlmostEqual(0.5, likelihood.noise.item())

        likelihood.task_noise_covar = mat
        self.assertAllClose(mat, likelihood.task_noise_covar)

        with self.assertRaises(AttributeError) as context:
            likelihood.task_noises = torch.tensor([0.04, 0.04, 0.04])
        self.assertTrue("task noises" in str(context.exception))
Exemplo n.º 2
0
def run(obs, params_true, device='cpu'):
    device = safe_cast(torch.device, device)

    dx, NX = PARAM_DX, PARAM_MESH_RES_SPACE

    ts = torch.arange(PARAM_MESH_RES_TIME, device=device)

    priors_uniform = priors()

    y = torch.tensor(obs['Ss'], device=device)

    def simulate(params):
        _theta = {'a': params[0], 'b': params[1], 'k': params[2]}
        sim_pde = LandauCahnHilliard(params=_theta,
                                     M=PARAM_DT,
                                     dx=dx,
                                     device=device)
        loss_fn = Evaluator(sim_pde, loss)

        return loss_fn

    pgd = lhs(3, samples=PARAM_INIT_EVAL
              )  # calculate initial samples from latin hypercube
    xs, ys = [], []

    for j in range(PARAM_INIT_EVAL):
        xk = torch.stack(
            [(priors_uniform[k][1] - priors_uniform[k][0]) *
             torch.tensor(pgd[j, i], device=device, dtype=torch.float32) +
             priors_uniform[k][0] for i, k in enumerate(('a', 'b', 'k'))], 0)
        xs.append(xk)


#    ell, params = simulate(params)

    phi0 = (0.2 * torch.rand((NX, NX), device=device)).view(-1, 1, NX, NX)

    # with torch.no_grad():
    for j in range(PARAM_INIT_EVAL):
        params = xs[j]
        loss_fn = simulate(params)
        # print(loss_fn._pde)
        # for n,p in loss_fn.named_parameters():
        #     print(n + '->' + str(p))
        ell = loss_fn(phi0, ts, y, dx)
        ell.backward()
        grads = (loss_fn._pde._a.grad, loss_fn._pde._b.grad,
                 loss_fn._pde._k.grad)
        ys.append(torch.stack([ell.detach(), *grads]).unsqueeze(0))
        print('init sample %d/%d' % (j, PARAM_INIT_EVAL))

    x_init, y_init = torch.stack(xs), torch.cat(ys, 0)

    #
    # print(y_init)
    N = PARAM_SEARCH_RES
    x_eval = torch.cat([x.reshape(-1,1) for x in torch.meshgrid(
        *[torch.linspace(priors_uniform[k][0], priors_uniform[k][1], N)\
            for k in priors_uniform]
    )],1)

    print(x_init.shape)
    print(x_eval.shape)
    x_train = x_init
    y_train = y_init

    print(x_init)
    print(y_init)

    jit = 1e-2

    lik = MultitaskGaussianLikelihood(num_tasks=4)
    lik.noise_covar.noise = jit * torch.ones(4)
    lik.noise = torch.tensor(jit).sqrt()

    for i in range(PARAM_MAX_EVAL - PARAM_INIT_EVAL):
        for ntry in range(5):
            model = ExactGPModel(x_train, y_train, lik)
            try:
                optimise(model, method='adam', max_iter=1000)
                break
            except Exception as err:
                print('attempt %d failed' % ntry)
                if ntry == 4:
                    raise err

        u = acq(y_train[:, 0].min(), model, x_eval)
        idx = u.argmax()
        xn = x_eval[idx, :]
        loss_fn = simulate(xn)

        ell = loss_fn(phi0, ts, y, dx)
        ell.backward()
        grads = (loss_fn._pde._a.grad, loss_fn._pde._b.grad,
                 loss_fn._pde._k.grad)
        #ys.append(torch.stack([ell.detach(), *grads]).unsqueeze(0))

        yn = torch.stack([ell.detach(), *grads], -1).unsqueeze(0)

        x_eval = torch.cat([x_eval[0:idx, :], x_eval[idx + 1:, :]], 0)
        x_train = torch.cat([x_train, xn.reshape(1, -1)])

        # y_train = torch.stack([*y_train, yn.detach()])
        y_train = torch.cat([y_train, yn], 0)
        print(x_train)
        print(y_train)

        print(i)
    #
    return (x_train, y_train)