Beispiel #1
0
    def loss_critic(params_critic,
                    i,
                    lambda_gp=lambda_gp,
                    batch_size=batch_size):
        y_critic = np.zeros(batch_size)
        # y_critic[:batch_size // 2] = 0.0  # 0 == fake
        y_critic[batch_size // 2:] = 1.0

        rng = check_random_state(i)

        # WGAN loss
        thetas = gaussian_draw(state["params_proposal"],
                               batch_size // 2,
                               random_state=rng)
        _X_gen = np.zeros((batch_size // 2, n_features))
        for j, theta in enumerate(thetas):
            _X_gen[j, :] = simulator(theta, 1, random_state=rng).ravel()

        indices = rng.permutation(len(X_obs))
        _X_obs = X_obs[indices[:batch_size // 2]]
        X = np.vstack([_X_gen, _X_obs])

        y_pred = predict(X, params_critic)
        l_wgan = np.mean(-y_critic * y_pred + (1. - y_critic) * y_pred)

        # Gradient penalty
        eps = rng.rand(batch_size // 2, 1)
        _X_hat = eps * _X_obs + (1. - eps) * _X_gen
        grad_Dx = grad_predict_critic(_X_hat, params_critic)
        norms = np.sum(grad_Dx**2, axis=1)**0.5
        l_gp = np.mean((norms - 1.0)**2.0)

        return l_wgan + lambda_gp * l_gp
Beispiel #2
0
    def approx_grad_u(params_proposal, i):
        rng = check_random_state(i)
        grad_u = {
            k: np.zeros(len(params_proposal[k]))
            for k in params_proposal
        }
        grad_ent = {
            k: np.zeros(len(params_proposal[k]))
            for k in params_proposal
        }
        thetas = gaussian_draw(params_proposal, batch_size, random_state=rng)

        for theta in thetas:
            x = simulator(theta, 1, random_state=rng)
            dx = predict(x, state["params_critic"]).ravel()

            grad_q = grad_gaussian_logpdf(params_proposal, theta)
            for k, v in grad_q.items():
                grad_u[k] += -dx * v

        grad_entropy = grad_gaussian_entropy(params_proposal)
        for k, v in grad_entropy.items():
            grad_ent[k] += v

        M = len(thetas)

        for k in grad_u:
            grad_u[k] = 1. / M * grad_u[k] + state["gamma"] * grad_ent[k]

        return grad_u
Beispiel #3
0
    plt.ylabel(r"$G_f$")
    plt.xlim((true_theta[0] - offset) * (50 - 40) + 40,
             (true_theta[0] + offset) * (50 - 40) + 40)
    plt.ylim((true_theta[1] - offset) * (1.5 - 0.5) + 0.5,
             (true_theta[1] + offset) * (1.5 - 0.5) + 0.5)

    plt.legend(loc="lower right")

    # histograms
    ax2 = plt.subplot2grid((2, 2), (0, 1))
    plt.xlim(-1, 1)
    Xs = [X_obs]

    for state in history:
        thetas = gaussian_draw(state["params_proposal"],
                               50000,
                               random_state=rng)
        X_ = np.zeros((len(thetas), 1))
        for j, theta in enumerate(thetas):
            X_[j, :] = simulator(theta, 1).ravel()
        Xs.append(X_)

    plt.hist(Xs,
             histtype="bar",
             label=[r"$x \sim p_r(x)$"] + [
                 r"$x \sim p(x|\psi)\ \gamma=%d$" % state["gamma"]
                 for state in history
             ],
             color=["C0"] + [state["color"] for state in history],
             range=(-1, 1),
             bins=15,