def _calculate_conditional_probabilities_jointly(self, y, indices, remaining_indices):
        assert len(y) == len(indices), "Not the same observation length as indices"
        probs = np.zeros([len(remaining_indices), 2])
        remaining_indices_set = set(remaining_indices)
        for k, unseen_index in enumerate(remaining_indices):
            cov_slice = self.covariance[indices + [unseen_index] + list(remaining_indices_set - set([unseen_index])), :][:, indices + [unseen_index] + list(remaining_indices_set - set([unseen_index]))]
            probs[k, 0] = norm_pdf(np.concatenate([y, [0.5] + [-0.5] * (len(remaining_indices) -1)]), cov_slice)
            probs[k, 1] = norm_pdf(np.concatenate([y, [-0.5] * len(remaining_indices)]), cov_slice)
        # marginalize probabilities

        return (probs.T / probs.sum(axis=1)).T
Example #2
0
        def plot_stuff(npm_idx):

            index = npm_indices[npm_idx]

            d, nearby_idx, meta = npm.query_around_point(
                kdt, X[index], **kdt_kwds)

            y = Y[nearby_idx]
            ball = X[nearby_idx]

            print(np.max(y))

            inits = npm._get_1d_initialisation_point(y,
                                                     scalar=mu_multiple_scalar,
                                                     bounds=bounds)

            fig, ax = plt.subplots()
            ax.hist(y, normed=True, bins=np.linspace(0, 10, 100))

            theta, mu_single, sigma_single, mu_multiple, sigma_multiple = npm_results[
                npm_idx]

            xi = np.linspace(0, 10, 100)
            ax.plot(xi,
                    utils.norm_pdf(xi, mu_single, sigma_single, theta),
                    c='r',
                    lw=2)
            ax.plot(xi,
                    utils.lognorm_pdf(xi, mu_multiple, sigma_multiple, theta),
                    c='g',
                    lw=2)

            ax.plot(xi,
                    utils.lognorm_pdf(xi, mu_multiple, 0.1, theta),
                    c='k',
                    lw=2)

            for init in inits:
                if init == "random": continue

                ax.plot(xi,
                        utils.norm_pdf(xi, init["mu_single"],
                                       init["sigma_single"], init["theta"]),
                        c='r',
                        lw=1)
                ax.plot(xi,
                        utils.lognorm_pdf(xi, init["mu_multiple"],
                                          init["sigma_multiple"],
                                          init["theta"]),
                        c='g',
                        lw=1)
Example #3
0
    def delta(self, time, instrument, numeraire):
        ttm = self.maturity - time
        forward = instrument * numeraire[-1] / numeraire
        m = tf.math.log(forward / self.strike)
        v = self.volatility * tf.math.sqrt(ttm)
        scale = instrument * v
        raw_delta = norm_pdf(m / v - v / 2.) / scale / numeraire[-1]

        return tf.where(tf.equal(v, 0.), 0., raw_delta)
    def _calculate_conditional_probabilities(self, y, indices, remaining_indices, singular=False, intensity=0.5):
        assert len(y) == len(indices), "Not the same observation length as indices"
        print(remaining_indices)
        probs = np.zeros([len(remaining_indices), 2])
        if singular:
          diagonal_noise = np.eye(len(indices) + 1) * 1e-6
          for k, unseen_index in enumerate(remaining_indices):
              probs[k, 0] = norm_pdf(np.concatenate([y, [intensity]]),
                       self.covariance[indices + [unseen_index], :][:, indices + [unseen_index]] + diagonal_noise)
              probs[k, 1] = norm_pdf(np.concatenate([y, [-intensity]]),
                       self.covariance[indices + [unseen_index], :][:, indices + [unseen_index]] + diagonal_noise)
        else:
          for k, unseen_index in enumerate(remaining_indices):
              probs[k, 0] = norm_pdf(np.concatenate([y, [intensity]]),
                       self.covariance[indices + [unseen_index], :][:, indices + [unseen_index]])
              probs[k, 1] = norm_pdf(np.concatenate([y, [-intensity]]),
                       self.covariance[indices + [unseen_index], :][:, indices + [unseen_index]])
        # marginalize probabilities

        return (probs.T / probs.sum(axis=1)).T
    def posterior_entropy(self,
                                  positive = [], 
                                  negative = [], 
                                  remaining_indices = []):
        
        indices, y = self.observation_vector(positive,negative)
        entropies = np.zeros([len(remaining_indices)])
        remaining_indices_set = set(remaining_indices)

        for k, unseen_index in enumerate(remaining_indices):
            # suppose we observe this element. What is the new entropy ?
            
            test_indices = indices + [unseen_index]
            
            prob_pos = norm_pdf(np.concatenate([y, [0.5]]),
                     self.covariance[test_indices, :][:, test_indices])
            prob_neg = norm_pdf(np.concatenate([y, [-1.0]]),
                     self.covariance[test_indices, :][:, test_indices])
            
            # marginalize:
            Z = (prob_pos + prob_neg)
            prob_pos /= Z
            prob_neg /= Z
            
            # get the entropy of remaining elements:
            post_indices = remaining_indices_set.copy()
            post_indices.remove(unseen_index)
            post_probs_pos = self._calculate_conditional_probabilities(np.concatenate([y , [0.5]]),
                                                                       test_indices,
                                                                       post_indices,
                                                                       self.covariance)
            post_probs_neg = self._calculate_conditional_probabilities(np.concatenate([y , [-.5]]),
                                                                       test_indices,
                                                                       post_indices,
                                                                       self.covariance)
            # expected entropy:
            entropies[k] = (
                prob_pos * entropy(post_probs_pos) +
                prob_neg * entropy(post_probs_neg)
            )
        return (remaining_indices, entropies)
    def _calculate_conditional_probabilities(self,
                                             y,
                                             indices,
                                             remaining_indices,
                                             singular=False,
                                             intensity=0.5):
        assert len(y) == len(
            indices), "Not the same observation length as indices"
        probs = np.zeros([len(remaining_indices), 2])
        if singular:
            diagonal_noise = np.eye(len(indices) + 1) * 1e-6
            for k, unseen_index in enumerate(remaining_indices):
                probs[k, 0] = norm_pdf(
                    np.concatenate([y, [intensity]]),
                    self.covariance[indices +
                                    [unseen_index], :][:, indices +
                                                       [unseen_index]] +
                    diagonal_noise)
                probs[k, 1] = norm_pdf(
                    np.concatenate([y, [-intensity]]),
                    self.covariance[indices +
                                    [unseen_index], :][:, indices +
                                                       [unseen_index]] +
                    diagonal_noise)
        else:
            for k, unseen_index in enumerate(remaining_indices):
                probs[k, 0] = norm_pdf(
                    np.concatenate([y, [intensity]]),
                    self.covariance[indices +
                                    [unseen_index], :][:, indices +
                                                       [unseen_index]])
                probs[k, 1] = norm_pdf(
                    np.concatenate([y, [-intensity]]),
                    self.covariance[indices +
                                    [unseen_index], :][:, indices +
                                                       [unseen_index]])
        # marginalize probabilities

        return (probs.T / probs.sum(axis=1)).T
    def posterior_entropy(self,
                          positive=[],
                          negative=[],
                          remaining_indices=[]):

        indices, y = self.observation_vector(positive, negative)
        entropies = np.zeros([len(remaining_indices)])
        remaining_indices_set = set(remaining_indices)

        for k, unseen_index in enumerate(remaining_indices):
            # suppose we observe this element. What is the new entropy ?

            test_indices = indices + [unseen_index]

            prob_pos = norm_pdf(
                np.concatenate([y, [0.5]]),
                self.covariance[test_indices, :][:, test_indices])
            prob_neg = norm_pdf(
                np.concatenate([y, [-1.0]]),
                self.covariance[test_indices, :][:, test_indices])

            # marginalize:
            Z = (prob_pos + prob_neg)
            prob_pos /= Z
            prob_neg /= Z

            # get the entropy of remaining elements:
            post_indices = remaining_indices_set.copy()
            post_indices.remove(unseen_index)
            post_probs_pos = self._calculate_conditional_probabilities(
                np.concatenate([y, [0.5]]), test_indices, post_indices,
                self.covariance)
            post_probs_neg = self._calculate_conditional_probabilities(
                np.concatenate([y, [-.5]]), test_indices, post_indices,
                self.covariance)
            # expected entropy:
            entropies[k] = (prob_pos * entropy(post_probs_pos) +
                            prob_neg * entropy(post_probs_neg))
        return (remaining_indices, entropies)
Example #8
0
def bachelier_price(time_to_maturity, spot, strike, volatility):
    """Returns price in Bachelier's model.
    Args:
        see black_price
    Returns:
        price: (batch_size, timesteps + 1)
    """
    v = volatility * tf.math.sqrt(time_to_maturity)
    d = (spot - strike) / v
    price = v * (d * utils.norm_cdf(d, approx=True) + utils.norm_pdf(d))

    # due to no interest rate, v=0 implies S_T=S_t a.s.
    return tf.where(tf.equal(v, 0.0), tf.maximum(spot - strike, 0.0), price)
    def _calculate_conditional_probabilities_jointly(self, y, indices,
                                                     remaining_indices):
        assert len(y) == len(
            indices), "Not the same observation length as indices"
        probs = np.zeros([len(remaining_indices), 2])
        remaining_indices_set = set(remaining_indices)
        for k, unseen_index in enumerate(remaining_indices):
            cov_slice = self.covariance[
                indices + [unseen_index] +
                list(remaining_indices_set -
                     set([unseen_index])), :][:, indices + [unseen_index] +
                                              list(remaining_indices_set -
                                                   set([unseen_index]))]
            probs[k, 0] = norm_pdf(
                np.concatenate(
                    [y, [0.5] + [-0.5] * (len(remaining_indices) - 1)]),
                cov_slice)
            probs[k, 1] = norm_pdf(
                np.concatenate([y, [-0.5] * len(remaining_indices)]),
                cov_slice)
        # marginalize probabilities

        return (probs.T / probs.sum(axis=1)).T
Example #10
0
    def delta(self, time, instrument, numeraire):
        ttm = self.maturity - time
        vol_time = self.volatility * tf.sqrt(ttm)
        d = (instrument - self.strike) / vol_time

        return utils.norm_pdf(d) / vol_time
Example #11
0
        def optimize_mixture_model(index, inits=None, debug=False):

            suppress = config.get("suppress_stan_output", True)

            # Select indices and get data.
            d, nearby_idx, meta = npm.query_around_point(
                kdt, X[index], **kdt_kwds)

            y = Y[nearby_idx]
            ball = X[nearby_idx]

            if inits is None:
                inits = npm._get_1d_initialisation_point(
                    y, scalar=mu_multiple_scalar, bounds=bounds)

            # Update meta dictionary with things about the data.
            meta = dict(max_log_y=np.log(np.max(y)),
                        N=nearby_idx.size,
                        y_percentiles=np.percentile(y, [16, 50, 84]),
                        ball_ptps=np.ptp(ball, axis=0),
                        ball_medians=np.median(ball, axis=0),
                        init_points=inits,
                        kdt_indices=nearby_idx)

            data_dict = dict(y=y, N=y.size, scalar=mu_multiple_scalar)
            data_dict.update(stan_bounds)

            p_opts = []
            ln_probs = []
            for j, init_dict in enumerate(inits):

                opt_kwds = dict(init=init_dict,
                                data=data_dict,
                                as_vector=False)
                opt_kwds.update(default_opt_kwds)

                # Do optimization.
                # TODO: Suppressing output is always dangerous.
                with stan.suppress_output(suppress) as sm:
                    try:
                        p_opt = model.optimizing(**opt_kwds)

                    except:
                        logger.exception(f"Exception occurred when optimizing index {index}"\
                                          f" from {init_dict}:")
                    else:
                        if p_opt is not None:
                            p_opts.append(p_opt["par"])

                            ln_probs.append(
                                utils.ln_prob(
                                    y,
                                    1,
                                    *utils._pack_params(**p_opt["par"]),
                                    bounds=bounds))

                            assert abs(ln_probs[-1] - p_opt["value"]) < 1e-8

                try:
                    p_opt

                except UnboundLocalError:
                    logger.warning("Stan failed. STDOUT & STDERR:")
                    logger.warning("\n".join(sm.outputs))

                else:
                    if p_opt is None:
                        stdout, stderr = sm.outputs
                        logger.warning("Stan only returned p_opt = None")
                        logger.warning(f"STDOUT:\n{stdout}\nSTDERR:\n{stderr}")

            if len(p_opts) < 1:
                logger.warning(f"Optimization on index {index} did not converge"\
                                "from any initial point trialled. Consider "\
                                "relaxing the optimization tolerances! If this "\
                                "occurs regularly then something is very wrong!")

                return (index, None, meta)

            else:
                # evaluate best.
                idx = np.argmax(ln_probs)
                p_opt = p_opts[idx]
                meta["init_idx"] = idx
                """
                # Calculate uncertainties.
                op_bounds = ()
                def nlp(p):
                    w, mu_s, sigma_s, sigma_m = p
                    mu_m = np.log(mu_s + mu_multiple_scalar * sigma_s) + sigma_m**2

                    if not (bounds["theta"][1] >= w >= bounds["theta"][0]) \
                    or not (bounds["mu_single"][1] >= mu_s >= bounds["mu_single"][0]) \
                    or not (bounds["sigma_multiple"][1] >= sigma_m >= bounds["sigma_multiple"][0]):
                        return np.inf

                    return -utils.ln_likelihood(y, w, mu_s, sigma_s, mu_m, sigma_m)


                op_bounds = [bounds["theta"],
                             bounds["mu_single"],
                             bounds["sigma_single"],
                             bounds["sigma_multiple"],
                ]

                #x0 = utils._pack_params(**p_opt)
                x0 = (p_opt["theta"], p_opt["mu_single"], p_opt["sigma_single"], p_opt["sigma_multiple"])
                p_opt2 = op.minimize(nlp, x0, bounds=op_bounds, method="L-BFGS-B")
                """

                # Create a three-panel figure showing:

                # (1) a log-density of the HRD + the selected ball points
                # (2) a log-density of colour vs apparent magnitude + the selected ball points
                # (3) the jitter + fitted parameters

                if sampling:

                    chains = 2  # TODO: move to config file.
                    sampling_kwds = dict(data=opt_kwds["data"],
                                         init=[p_opt] * chains,
                                         chains=chains)
                    try:
                        samples = model.sampling(**sampling_kwds)

                    except:
                        None

                    else:
                        extracted = samples.extract()
                        chains = np.array(
                            [extracted[k] for k in samples.flatnames]).T

                        latex_labels = dict(
                            theta=r"$w$",
                            mu_single=r"$\mu_\mathrm{single}$",
                            sigma_single=r"$\sigma_\mathrm{single}$",
                            mu_multiple=r"$\mu_\mathrm{multiple}$",
                            sigma_multiple=r"$\sigma_\mathrm{multiple}$")

                        corner_fig = corner.corner(
                            chains,
                            labels=[
                                latex_labels[k] for k in samples.flatnames
                            ])

                        source_id = S[index]
                        figure_path = os.path.join(
                            figures_dir,
                            f"{model_name}-{source_id}-samples.png")
                        corner_fig.savefig(figure_path, dpi=150)

                        chains_path = os.path.join(
                            figures_dir,
                            f"{model_name}-{source_id}-chains.pkl")

                        dump = dict(names=samples.flatnames,
                                    chains=chains,
                                    y=y,
                                    ball=ball,
                                    X=X[index])

                        with open(chains_path, "wb") as fp:
                            pickle.dump(dump, fp)

                        plt.close("all")

                if plot_mixture_model_figures:

                    source_id = S[index]

                    figure_path = os.path.join(
                        figures_dir, f"{model_name}-{source_id}.png")

                    x_upper = 2 * config["models"][model_name]["bounds"][
                        "mu_single"][1]
                    bins = np.linspace(0, x_upper, 51)

                    xi = np.linspace(0, x_upper, 1000)
                    y_s = utils.norm_pdf(xi, p_opt["mu_single"],
                                         p_opt["sigma_single"], p_opt["theta"])
                    y_m = utils.lognorm_pdf(xi, p_opt["mu_multiple"],
                                            p_opt["sigma_multiple"],
                                            p_opt["theta"])

                    items_for_deletion = [
                        axes[0].scatter(ball.T[0],
                                        ball.T[1],
                                        c="tab:blue",
                                        s=1,
                                        zorder=10,
                                        alpha=0.5),
                        axes[1].scatter(ball.T[0],
                                        ball.T[2],
                                        c="tab:blue",
                                        s=1,
                                        zorder=10,
                                        alpha=0.5),
                        axes[2].hist(y,
                                     bins=bins,
                                     facecolor="#cccccc",
                                     density=True,
                                     zorder=-1)[-1],
                        axes[2].axvline(Y[index], c="#666666"),
                        axes[2].plot(xi, y_s, c="tab:blue"),
                        axes[2].fill_between(xi,
                                             np.zeros_like(y_s),
                                             y_s,
                                             facecolor="tab:blue",
                                             alpha=0.25),
                        axes[2].plot(xi, y_m, c="tab:red"),
                        axes[2].fill_between(xi,
                                             np.zeros_like(y_m),
                                             y_m,
                                             facecolor="tab:red",
                                             alpha=0.25),
                    ]

                    # Ax limits.

                    axes[0].set_xlim(-0.5, 5)
                    axes[0].set_ylim(10, -15)

                    axes[1].set_xlim(-0.5, 5)
                    axes[1].set_ylim(15, 3)

                    axes[2].set_xlim(0, x_upper)
                    axes[2].set_yticks([])

                    fig.tight_layout()

                    fig.savefig(figure_path, dpi=150)

                    for item in items_for_deletion:
                        try:
                            item.set_visible(False)

                        except AttributeError:
                            for _ in item:
                                if hasattr(_, "set_visible"):
                                    _.set_visible(False)

                if debug:

                    # Create
                    raise a

                return (index, p_opt, meta)
Example #12
0
    def value(self, time, instrument, numeraire):
        ttm = self.maturity - time
        vol_time = self.volatility * tf.sqrt(ttm)
        d = (instrument - self.strike) / vol_time

        return vol_time * (d * utils.norm_cdf(d) + utils.norm_pdf(d))