コード例 #1
0
ファイル: problem.py プロジェクト: mshvartsman/aepsych-1
    def evaluate(self, strat):
        metrics = super().evaluate(strat)

        thresh = self.options.get("thresh", 0.75)
        gridsize = self.options.get("gridsize", 10)
        post_mean, _ = strat.predict(self.eval_grid)

        dim = self.eval_grid.shape[1]
        post_mean_reshape = post_mean.reshape((gridsize,) * dim)
        phi_post_mean = norm.cdf(post_mean_reshape.detach().numpy())
        # assume mono_dim is last dim (TODO make this better)

        x1 = _dim_grid(
            lower=strat.lb.numpy()[-1],
            upper=strat.ub.numpy()[-1],
            dim=1,
            gridsize=gridsize,
        ).squeeze()
        x2_hat = get_lse_contour(phi_post_mean, x1, level=thresh, lb=-1.0, ub=1.0)

        true_f = self.f(self.eval_grid)

        true_f_reshape = true_f.reshape((gridsize,) * dim)

        true_x2 = get_lse_contour(
            norm.cdf(true_f_reshape), x1, level=thresh, lb=-1.0, ub=1.0
        )
        assert x2_hat.shape == true_x2.shape, (
            "x2_hat.shape != true_x2.shape, something went wrong!"
            + f"x2_hat.shape={x2_hat.shape}, true_x2.shape={true_x2.shape}"
        )
        mae = np.mean(np.abs(true_x2 - x2_hat))
        mse = np.mean((true_x2 - x2_hat) ** 2)
        max_abs_err = np.max(np.abs(true_x2 - x2_hat))

        metrics["mean_abs_err_thresh"] = mae
        metrics["mean_square_err_thresh"] = mse
        metrics["max_abs_err_thresh"] = max_abs_err

        if dim != 1:
            corr = pearsonr(true_x2.flatten(), x2_hat.flatten())[0]
            metrics["pearson_corr_thresh"] = corr

        # now construct integrated error on thresh
        fsamps = strat.sample(self.eval_grid, num_samples=1000).detach().numpy()

        square_samps = [s.reshape((gridsize,) * strat.modelbridge.dim) for s in fsamps]
        contours = np.stack(
            [
                get_lse_contour(norm.cdf(s), x1, level=thresh, mono_dim=-1, lb=-1, ub=1)
                for s in square_samps
            ]
        )

        thresh_err = contours - true_x2[None, :]

        metrics["mean_integrated_abs_err_thresh"] = np.mean(np.abs(thresh_err))
        metrics["mean_integrated_square_err_thresh"] = np.mean(thresh_err ** 2)

        return metrics
コード例 #2
0
ファイル: base.py プロジェクト: mshvartsman/aepsych-1
    def _get_contour(self, gridsize=30):

        from aepsych.utils import get_lse_contour

        if self.dim == 2:

            grid_search = _dim_grid(self, gridsize=gridsize)
            post_mean, _ = self.predict(torch.Tensor(grid_search))
            post_mean = norm.cdf(post_mean.reshape(gridsize, gridsize).detach().numpy())
            x1 = _dim_grid(lower=self.lb, upper=self.ub, dim=1, gridsize=gridsize)
            x2_hat = get_lse_contour(
                post_mean, x1, level=self.target, lb=x1.min(), ub=x1.max()
            )
            return x2_hat
        else:
            return None
コード例 #3
0
ファイル: problem.py プロジェクト: mshvartsman/aepsych-1
 def __init__(self, lb, ub, **options):
     assert len(lb) == len(ub), "bounds should be same size"
     dim = len(lb)
     self.options = options
     gridsize = self.options.get("gridsize", 10)
     self.eval_grid = _dim_grid(
         lower=lb, upper=ub, dim=dim, gridsize=gridsize
     ).squeeze()
コード例 #4
0
ファイル: base.py プロジェクト: mshvartsman/aepsych-1
    def get_jnd(
        self, grid=None, cred_level=None, intensity_dim=-1, confsamps=500, method="step"
    ):
        """Calculate the JND. Note that JND can have multiple plausible definitions
        outside of the linear case, so we provide options for how to compute it.
        For method="step", we report how far one needs to go over in stimulus
        space to move 1 unit up in latent space (this is a lot of people's
        conventional understanding of the JND).
        For method="taylor", we report the local derivative, which also maps to a
        1st-order Taylor expansion of the latent function. This is a formal
        generalization of JND as defined in Weber's law.
        Both definitions are equivalent for linear psychometric functions.
        """
        if grid is None:
            grid = _dim_grid(self)

        # this is super awkward, back into intensity dim grid assuming a square grid
        gridsize = int(np.power(grid.shape[0], 1 / grid.shape[1]))
        coords = np.linspace(self.lb[intensity_dim], self.ub[intensity_dim], gridsize)

        if cred_level is None:
            fmean, _ = self.predict(grid)
            fmean = fmean.detach().numpy().reshape(*[gridsize for i in range(self.dim)])

            if method == "taylor":
                return 1 / np.gradient(fmean, coords, axis=intensity_dim)
            elif method == "step":
                return np.clip(
                    get_jnd_multid(fmean, coords, mono_dim=intensity_dim), 0, np.inf
                )
        else:
            alpha = 1 - cred_level
            qlower = alpha / 2
            qupper = 1 - alpha / 2

            fsamps = self.sample(grid, confsamps)
            if method == "taylor":
                jnds = 1 / np.gradient(
                    fsamps.detach()
                    .numpy()
                    .reshape(confsamps, *[gridsize for i in range(self.dim)]),
                    coords,
                    axis=intensity_dim,
                )
            elif method == "step":
                samps = [
                    s.reshape((gridsize,) * self.dim) for s in fsamps.detach().numpy()
                ]
                jnds = np.stack(
                    [get_jnd_multid(s, coords, mono_dim=intensity_dim) for s in samps]
                )
            upper = np.clip(np.quantile(jnds, qupper, axis=0), 0, np.inf)
            lower = np.clip(np.quantile(jnds, qlower, axis=0), 0, np.inf)
            median = np.clip(np.quantile(jnds, 0.5, axis=0), 0, np.inf)
            return median, lower, upper

        raise RuntimeError(f"Unknown method {method}!")
コード例 #5
0
ファイル: test_utils.py プロジェクト: mshvartsman/aepsych-1
    def test_dim_grid_modelbridge_size(self):

        lb = -4.0
        ub = 4.0
        dim = 1
        gridsize = 10
        mb = ModelBridge(lb=lb, ub=ub, dim=dim)
        grid = _dim_grid(mb, gridsize=gridsize)
        self.assertEqual(grid.shape, torch.Size([10, 1]))
コード例 #6
0
    def test_songetal_funs_smoke(self):
        valid_phenotypes = [
            "Metabolic", "Sensory", "Metabolic+Sensory", "Older-normal"
        ]
        grid = _dim_grid(lower=[-3, -20], upper=[4, 120], dim=2, gridsize=30)
        try:
            for phenotype in valid_phenotypes:
                testfun = make_songetal_testfun(phenotype=phenotype)
                f = testfun(grid)
                self.assertTrue(f.shape == torch.Size([900]))
        except Exception:
            self.fail()

        with self.assertRaises(AssertionError):
            _ = make_songetal_testfun(phenotype="not_a_real_phenotype")
コード例 #7
0
def plot_prior_samps_1d():
    config = Config(
        config_dict={
            "common": {
                "outcome_type": "single_probit",
                "target": 0.75,
                "lb": "[-3]",
                "ub": "[3]",
            },
            "default_mean_covar_factory": {},
            "song_mean_covar_factory": {},
            "monotonic_mean_covar_factory": {"monotonic_idxs": "[0]"},
        }
    )
    lb = torch.Tensor([-3])
    ub = torch.Tensor([3])
    nsamps = 10
    gridsize = 50
    grid = _dim_grid(lower=lb, upper=ub, dim=1, gridsize=gridsize)
    np.random.seed(global_seed)
    torch.random.manual_seed(global_seed)
    with gpytorch.settings.prior_mode(True):
        rbf_mean, rbf_covar = default_mean_covar_factory(config)
        rbf_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=rbf_mean,
            covar_module=rbf_covar,
        )
        # add just two samples at high and low
        rbf_model.set_train_data(
            torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
        )
        rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))

        song_mean, song_covar = song_mean_covar_factory(config)
        song_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=song_mean,
            covar_module=song_covar,
        )
        song_model.set_train_data(
            torch.Tensor([-3, 3])[:, None], torch.LongTensor([0, 1])
        )

        song_samps = song_model(grid).sample(torch.Size([nsamps]))

        mono_mean, mono_covar = monotonic_mean_covar_factory(config)
        mono_model = MonotonicRejectionGP(
            likelihood="probit-bernoulli",
            monotonic_idxs=[0],
            mean_module=mono_mean,
            covar_module=mono_covar,
        )

        bounds_ = torch.tensor([-3.0, 3.0])[:, None]
        # Select inducing points
        mono_model.inducing_points = draw_sobol_samples(
            bounds=bounds_, n=mono_model.num_induc, q=1
        ).squeeze(1)

        inducing_points_aug = mono_model._augment_with_deriv_index(
            mono_model.inducing_points, 0
        )
        scales = ub - lb
        dummy_train_x = mono_model._augment_with_deriv_index(
            torch.Tensor([-3, 3])[:, None], 0
        )
        mono_model.model = MixedDerivativeVariationalGP(
            train_x=dummy_train_x,
            train_y=torch.LongTensor([0, 1]),
            inducing_points=inducing_points_aug,
            scales=scales,
            fixed_prior_mean=torch.Tensor([0.75]),
            covar_module=mono_covar,
            mean_module=mono_mean,
        )
        mono_samps = mono_model.sample(grid, nsamps)

    fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
    fig.tight_layout(rect=[0.01, 0.03, 1, 0.9])
    fig.suptitle("GP prior samples (probit-transformed)")
    ax[0].plot(grid.squeeze(), norm.cdf(song_samps.T), "b")
    ax[0].set_ylabel("Response Probability")
    ax[0].set_title("Linear kernel")

    ax[1].plot(grid.squeeze(), norm.cdf(rbf_samps.T), "b")
    ax[1].set_xlabel("Intensity")
    ax[1].set_title("RBF kernel (nonmonotonic)")

    ax[2].plot(grid.squeeze(), norm.cdf(mono_samps.T), "b")
    ax[2].set_title("RBF kernel (monotonic)")
    return fig
コード例 #8
0
def plot_prior_samps_2d():
    config = Config(
        config_dict={
            "common": {
                "outcome_type": "single_probit",
                "target": 0.75,
                "lb": "[-3, -3]",
                "ub": "[3, 3]",
            },
            "default_mean_covar_factory": {},
            "song_mean_covar_factory": {},
            "monotonic_mean_covar_factory": {"monotonic_idxs": "[1]"},
        }
    )
    lb = torch.Tensor([-3, -3])
    ub = torch.Tensor([3, 3])
    nsamps = 5
    gridsize = 30
    grid = _dim_grid(lower=lb, upper=ub, dim=2, gridsize=gridsize)
    np.random.seed(global_seed)
    torch.random.manual_seed(global_seed)
    with gpytorch.settings.prior_mode(True):
        rbf_mean, rbf_covar = default_mean_covar_factory(config)
        rbf_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=rbf_mean,
            covar_module=rbf_covar,
        )
        # add just two samples at high and low
        rbf_model.set_train_data(torch.Tensor([-3, -3])[:, None], torch.LongTensor([0]))
        rbf_samps = rbf_model(grid).sample(torch.Size([nsamps]))

        song_mean, song_covar = song_mean_covar_factory(config)
        song_model = GPClassificationModel(
            inducing_min=lb,
            inducing_max=ub,
            inducing_size=100,
            mean_module=song_mean,
            covar_module=song_covar,
        )
        song_model.set_train_data(
            torch.Tensor([-3, -3])[:, None], torch.LongTensor([0])
        )

        song_samps = song_model(grid).sample(torch.Size([nsamps]))

        mono_mean, mono_covar = monotonic_mean_covar_factory(config)
        mono_model = MonotonicRejectionGP(
            likelihood="probit-bernoulli",
            monotonic_idxs=[1],
            mean_module=mono_mean,
            covar_module=mono_covar,
            num_induc=1000,
        )

        bounds_ = torch.tensor([-3.0, -3.0, 3.0, 3.0]).reshape(2, -1)
        # Select inducing points
        mono_model.inducing_points = draw_sobol_samples(
            bounds=bounds_, n=mono_model.num_induc, q=1
        ).squeeze(1)

        inducing_points_aug = mono_model._augment_with_deriv_index(
            mono_model.inducing_points, 0
        )
        scales = ub - lb
        dummy_train_x = mono_model._augment_with_deriv_index(
            torch.Tensor([-3, 3])[None, :], 0
        )
        mono_model.model = MixedDerivativeVariationalGP(
            train_x=dummy_train_x,
            train_y=torch.LongTensor([0]),
            inducing_points=inducing_points_aug,
            scales=scales,
            fixed_prior_mean=torch.Tensor([0.75]),
            covar_module=mono_covar,
            mean_module=mono_mean,
        )
        mono_samps = mono_model.sample(grid, nsamps)

    intensity_grid = np.linspace(-3, 3, gridsize)
    fig, ax = plt.subplots(1, 3, figsize=(7.5, 3))
    fig.tight_layout(rect=[0, 0.03, 1, 0.9])
    fig.suptitle("Prior samples")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in song_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[0].plot(intensity_grid, plotsamps, "b")
    ax[0].set_title("Linear kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in rbf_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[1].plot(intensity_grid, plotsamps, "b")
    ax[1].set_title("Nonmonotonic RBF kernel model")

    square_samps = np.array([s.reshape((gridsize,) * 2).numpy() for s in mono_samps])
    plotsamps = norm.cdf(square_samps[:, ::5, :]).T.reshape(gridsize, -1)
    ax[2].plot(intensity_grid, plotsamps, "b")
    ax[2].set_title("Monotonic RBF kernel model")

    return fig
コード例 #9
0
def plot_strat_1d(
    strat,
    title,
    ax=None,
    true_testfun=None,
    cred_level=0.95,
    target_level=0.75,
    xlabel="Intensity (abstract)",
    gridsize=30,
):

    x, y = strat.x, strat.y

    if ax is None:
        fig, ax = plt.subplots()

    grid = _dim_grid(modelbridge=strat.modelbridge, gridsize=gridsize)
    samps = norm.cdf(strat.modelbridge.sample(grid, num_samples=10000))
    phimean = samps.mean(0)
    upper = np.quantile(samps, cred_level, axis=0)
    lower = np.quantile(samps, 1 - cred_level, axis=0)

    ax.plot(np.squeeze(grid), phimean)
    ax.fill_between(
        np.squeeze(grid),
        lower,
        upper,
        alpha=0.3,
        hatch="///",
        edgecolor="gray",
        label=f"{cred_level*100:.0f}% posterior mass",
    )
    if target_level is not None:
        from aepsych.utils import interpolate_monotonic

        threshold_samps = [
            interpolate_monotonic(grid.squeeze().numpy(), s, target_level,
                                  strat.lb[0], strat.ub[0]) for s in samps
        ]
        thresh_med = np.mean(threshold_samps)
        thresh_lower = np.quantile(threshold_samps, q=1 - cred_level)
        thresh_upper = np.quantile(threshold_samps, q=cred_level)

        ax.errorbar(
            thresh_med,
            target_level,
            xerr=np.r_[thresh_med - thresh_lower,
                       thresh_upper - thresh_med][:, None],
            capsize=5,
            elinewidth=1,
            label=
            f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass marked)",
        )

    if true_testfun is not None:
        # true_testfun = lambda x: 3*x
        true_f = norm.cdf(true_testfun(grid))
        ax.plot(grid, true_f.squeeze(), label="True function")
        if target_level is not None:
            true_thresh = interpolate_monotonic(
                grid.squeeze().numpy(),
                true_f.squeeze(),
                target_level,
                strat.lb[0],
                strat.ub[0],
            )

            ax.plot(
                true_thresh.item(),
                target_level,
                "o",
                label=f"True {target_level*100:.0f}% threshold",
            )

    ax.scatter(
        x[y == 0, 0],
        np.zeros_like(x[y == 0, 0]),
        marker=3,
        color="r",
        label="Nondetected trials",
    )
    ax.scatter(
        x[y == 1, 0],
        np.zeros_like(x[y == 1, 0]),
        marker=3,
        color="b",
        label="Detected trials",
    )
    ax.set_xlabel(xlabel)
    ax.set_ylabel("Response Probability")

    ax.set_title(title)
コード例 #10
0
def plot_strat_2d(
    strat,
    title,
    ax=None,
    true_testfun=None,
    cred_level=0.95,
    target_level=0.75,
    xlabel="Context (abstract)",
    ylabel="Intensity (abstract)",
    yes_label="Yes trial",
    no_label="No trial",
    flipx=False,
    logx=False,
    gridsize=30,
):

    x, y = strat.x, strat.y

    if ax is None:
        fig, ax = plt.subplots()

    grid = _dim_grid(modelbridge=strat.modelbridge, gridsize=gridsize)
    fmean, fvar = strat.modelbridge.predict(grid)
    phimean = norm.cdf(fmean.reshape(gridsize, gridsize).detach().numpy()).T

    if flipx:
        extent = np.r_[strat.lb[0], strat.ub[0], strat.ub[1], strat.lb[1]]
        _ = ax.imshow(phimean,
                      aspect="auto",
                      origin="upper",
                      extent=extent,
                      alpha=0.5)
    else:
        extent = np.r_[strat.lb[0], strat.ub[0], strat.lb[1], strat.ub[1]]
        _ = ax.imshow(phimean,
                      aspect="auto",
                      origin="lower",
                      extent=extent,
                      alpha=0.5)

    # hacky relabel to be in logspace
    if logx:
        locs = np.arange(strat.lb[0], strat.ub[0])
        ax.set_xticks(ticks=locs)
        ax.set_xticklabels(2.0**locs)

    ax.plot(x[y == 0, 0],
            x[y == 0, 1],
            "ro",
            alpha=0.7,
            label="Nondetected trials")
    ax.plot(x[y == 1, 0],
            x[y == 1, 1],
            "bo",
            alpha=0.7,
            label="Detected trials")

    if target_level is not None:  # plot threshold
        mono_grid = np.linspace(strat.lb[1], strat.ub[1], num=gridsize)
        context_grid = np.linspace(strat.lb[0], strat.ub[0], num=gridsize)
        thresh_75, lower, upper = get_lse_interval(
            modelbridge=strat.modelbridge,
            mono_grid=mono_grid,
            target_level=target_level,
            cred_level=cred_level,
            mono_dim=1,
            n_samps=500,
            lb=mono_grid.min(),
            ub=mono_grid.max(),
            gridsize=gridsize,
        )
        ax.plot(
            context_grid,
            thresh_75,
            label=
            f"Est. {target_level*100:.0f}% threshold \n(with {cred_level*100:.0f}% posterior \nmass shaded)",
        )
        ax.fill_between(context_grid,
                        lower,
                        upper,
                        alpha=0.3,
                        hatch="///",
                        edgecolor="gray")

        if true_testfun is not None:
            true_f = true_testfun(grid).reshape(gridsize, gridsize)
            true_thresh = get_lse_contour(norm.cdf(true_f),
                                          mono_grid,
                                          level=target_level,
                                          lb=-1.0,
                                          ub=1.0)
            ax.plot(context_grid, true_thresh, label="Ground truth threshold")

    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)

    ax.set_title(title)