Esempio n. 1
0
    def test_ExpectedShortfall(self):
        alpha = 0.95
        risk_measure = hedge_models.ExpectedShortfall(alpha)

        # ES is known in closed form for uniform.
        loss = tf.math.sobol_sample(1, 2**19, dtype=FLOAT_DTYPE)
        var = alpha
        risk_measure.w.assign(var)

        result_fitted = risk_measure(-loss)
        result_evaluate = risk_measure.evaluate(-loss)
        expected = tf.constant([(1 + var) / 2.], FLOAT_DTYPE)

        tf.debugging.assert_near(result_fitted, expected, atol=1e-3)
        tf.debugging.assert_near(result_evaluate, expected, atol=1e-3)
Esempio n. 2
0
    def test_cost_gradient(self):
        timesteps = 3
        dimension = 2
        num = 1
        cost = True

        init_instruments, init_numeraire, book = books.random_put_call_book(
            timesteps / 250, dimension * 2, dimension, dimension, num)
        time, instruments, numeraire = book.sample_paths(
            init_instruments, init_numeraire, int(2**10), timesteps, True)

        martingales = instruments / numeraire
        features = tf.unstack(book.delta(time, instruments, numeraire) \
                              * numeraire,
                              axis=-1)[:-1]
        payoff = book.payoff(time, instruments, numeraire)

        model = hedge_models.LinearFeatureHedge(
            timesteps=timesteps,
            instrument_dim=book.instrument_dim,
            mappings=[approximators.IdentityFeatureMap] * (1 + cost))
        model.add_cost(1 / 100)
        model.compile(risk_measure=hedge_models.ExpectedShortfall(0.95))

        with tf.GradientTape() as tape:
            value, costs = model([features, martingales], training=True)
            wealth = value - costs - payoff
            loss = model.risk_measure(wealth)

        trainable_vars = [model.risk_measure.w] + model.trainable_variables
        gradient_expected = tape.gradient(loss, trainable_vars)
        gradient_result, wealth = model.cost_gradient(
            ([features, martingales, payoff], ))

        for x1, x2 in zip(gradient_result, gradient_expected):
            tf.debugging.assert_near(x1, x2)
Esempio n. 3
0
    cost=1 / 100 if cost else None,
    risk_neutral=True,
    learning_rate=1e-1)
driver.verbose = 2

risklevels = [0.05, 0.5, 0.95] if not cost else [0.95]
for alpha in risklevels:
    driver.add_testcase(f"deep network {alpha}",
                        hedge_models.NeuralHedge(
                            timesteps=timesteps,
                            instrument_dim=book.instrument_dim,
                            internal_dim=0,
                            num_layers=4,
                            num_units=5,
                            activation=tf.keras.activations.softplus),
                        risk_measure=hedge_models.ExpectedShortfall(alpha),
                        normaliser=preprocessing.MeanVarianceNormaliser(),
                        feature_function="log_martingale",
                        price_type="arbitrage")

if driver.cost is not None or not driver.risk_neutral:
    driver.add_liability_free(
        hedge_models.LinearFeatureHedge(
            timesteps=timesteps,
            instrument_dim=book.instrument_dim,
            mappings=[approximators.IdentityFeatureMap] \
                * (1 + (driver.cost is not None))),
        risk_measure=hedge_models.ExpectedShortfall(alpha),
        normaliser=preprocessing.MeanVarianceNormaliser(),
        feature_function="log_martingale")