예제 #1
0
    def _draw_gp_function(self, X, lengthscale=10.0, kernel_str="RBF"):
        if kernel_str == "RBF":
            kernel = RBFKernel()
        elif kernel_str == "Mat":
            kernel = MaternKernel(nu=0.5)
        else:
            raise Exception("Invalid kernel string: {}".format(kernel_str))

        kernel.lengthscale = lengthscale
        with torch.no_grad():
            lazy_cov = kernel(X)
            mean = torch.zeros(lazy_cov.size(0))
            mvn = MultivariateNormal(mean, lazy_cov)
            Y = mvn.rsample()[:, None]
        return Y
예제 #2
0
def create_bayesian_quadrature_iso_gauss():

    x1 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1]]))
    x2 = torch.from_numpy(np.array([[-1, 1], [0, 0], [-2, 0.1], [-3, 3]]))
    M1 = x1.size()[0]
    M2 = x2.size()[0]
    D = x1.size()[1]

    prior_mean = torch.from_numpy(np.arange(D))[None, :]
    prior_variance = 2.

    rbf = RBFKernel()
    rbf.lengthscale = 1.
    kernel = ScaleKernel(rbf)
    kernel.outputscale = 1.

    bqkernel = QuadratureRBFGaussPrior(kernel, prior_mean, prior_variance)

    return bqkernel, x1, x2, M1, M2, D
예제 #3
0
def create_rbf(sigma2, lengthscale):
    rbf = RBFKernel()
    rbf.lengthscale = lengthscale
    kernel = ScaleKernel(rbf)
    kernel.outputscale = sigma2
    return kernel
def tracking_expectation(ratio, dim, n_sample=2000, depth=11):
    """

    :param ratio: \sigma^2 / lengthscale^2
    :param dim:
    :param n_sample:
    :param depth:
    :return:
    """
    sigma2 = 1.  # default
    lengthscale2 = sigma2 / ratio

    kernel = RBFKernel()
    kernel.lengthscale = np.sqrt(lengthscale2)

    # x contains two data points
    x = torch.from_numpy(np.array([0., 1.]).astype('float32')).view(2, 1)
    # build model
    dgp = DeepGP(kernel, depth=depth, dim=dim)

    ## compute true expectation via recurrence
    Kx = kernel(x).evaluate_kernel()
    k12 = Kx.detach().numpy()[0, 1]
    EZ_1 = 2 * (1 - k12)
    true_EZ = [EZ_1]
    temp = EZ_1
    for d in range(1, depth - 1):
        next_EZ = recur(temp, sigma2, lengthscale2, dim=dim)
        true_EZ.append(next_EZ)
        temp = next_EZ

    # sample and collect
    for i in range(n_sample):
        dgp(x)
    collector = dgp.collector

    aggs_depth = []
    aggs_diff = []
    aggs_empirical = []
    # post-process sample
    for d in range(depth - 1):
        samples = collector[d]
        samples = np.stack(samples)  # n_sample x 2 x dim
        diff = np.diff(samples, axis=1)  # n_sample x dim
        diff = diff ** 2
        diff = diff.squeeze()
        # take 1 dimension
        selected_dim = 0
        diff = diff[:, selected_dim]
        aggs_depth.extend([d + 1] * n_sample)
        aggs_diff.extend(diff.tolist())
        aggs_empirical.extend(["empirical"] * n_sample)

    ## true expectation
    aggs_depth.extend(list(range(1, depth)))
    aggs_diff.extend(true_EZ)
    aggs_empirical.extend(["ours"] * len(true_EZ))

    ##
    ## THIS RESULT FROM DUNLOP. BUT CANNOT PLOT SINCE EZ IS TOO BIG
    # aggs_depth.extend(list(range(1, depth)))
    # aggs_diff.extend(dunlop_EZ)
    # aggs_empirical.extend(["Dunlop 2018"]*len(dunlop_EZ))

    d = {"Depth": aggs_depth, "Z": aggs_diff, "empirical": aggs_empirical}
    df = pd.DataFrame(data=d)
    ax = sns.pointplot(x='Depth', y='Z', hue='empirical', data=df, capsize=0.2, markers=["o", "*", "d"], join=False)
    ax.set_ylabel(r'Expectation of $Z_l$', fontdict=font)
    ax.set_xlabel(r'Layer $l$', fontdict=font)
    leg = ax.legend()
    ax.set_title(r"${{\sigma^2}}/{{\ell^2}}={}$,   $m={}$".format(ratio, dim), fontdict=font)
    name = "r_{}_m_{}".format(ratio, dim)
    plt.savefig("../figure/track_expectation/" + name + ".png", bbox_extra_artists=(leg,), bbox_inches='tight', dpi=300)
예제 #5
0
    def __init__(
        self,
        num_outputs,
        initial_lengthscale,
        initial_inducing_points,
        separate_inducing_points=False,
        kernel="RBF",
        ard=None,
        lengthscale_prior=False,
    ):
        n_inducing_points = initial_inducing_points.shape[0]
        if separate_inducing_points:
            # Use independent inducing points per output GP
            initial_inducing_points = initial_inducing_points.repeat(num_outputs, 1, 1)

        if num_outputs > 1:
            batch_shape = torch.Size([num_outputs])
        else:
            batch_shape = torch.Size([])

        variational_distribution = CholeskyVariationalDistribution(
            n_inducing_points, batch_shape=batch_shape
        )

        variational_strategy = VariationalStrategy(
            self, initial_inducing_points, variational_distribution
        )

        if num_outputs > 1:
            variational_strategy = IndependentMultitaskVariationalStrategy(
                variational_strategy, num_tasks=num_outputs
            )

        super().__init__(variational_strategy)

        if lengthscale_prior:
            lengthscale_prior = SmoothedBoxPrior(math.exp(-1), math.exp(1), sigma=0.1)
        else:
            lengthscale_prior = None

        kwargs = {
            "ard_num_dims": ard,
            "batch_shape": batch_shape,
            "lengthscale_prior": lengthscale_prior,
        }

        if kernel == "RBF":
            kernel = RBFKernel(**kwargs)
        elif kernel == "Matern12":
            kernel = MaternKernel(nu=1 / 2, **kwargs)
        elif kernel == "Matern32":
            kernel = MaternKernel(nu=3 / 2, **kwargs)
        elif kernel == "Matern52":
            kernel = MaternKernel(nu=5 / 2, **kwargs)
        elif kernel == "RQ":
            kernel = RQKernel(**kwargs)
        else:
            raise ValueError("Specified kernel not known.")

        kernel.lengthscale = initial_lengthscale * torch.ones_like(kernel.lengthscale)

        self.mean_module = ConstantMean(batch_shape=batch_shape)
        self.covar_module = ScaleKernel(kernel, batch_shape=batch_shape)