Beispiel #1
0
    def test_basic(self, Xy_dummy):
        eps = 1e-5
        X, _, _, _ = Xy_dummy
        dtype, device = X.dtype, X.device
        n_samples, n_channels, lookback, n_assets = X.shape

        rets = X.mean(dim=(1, 2))

        with pytest.raises(ValueError):
            SparsemaxAllocator(n_assets, temperature=None)(rets,
                                                           temperature=None)

        weights = SparsemaxAllocator(n_assets, temperature=2)(rets)

        assert torch.allclose(
            weights,
            SparsemaxAllocator(n_assets, temperature=None)(
                rets, 2 * torch.ones(n_samples, dtype=dtype, device=device)))
        assert weights.shape == (n_samples, n_assets)
        assert weights.dtype == X.dtype
        assert weights.device == X.device
        assert torch.all(-eps <= weights) and torch.all(weights <= 1 + eps)
        assert torch.allclose(weights.sum(dim=1),
                              torch.ones(n_samples).to(dtype=dtype,
                                                       device=device),
                              atol=eps)
Beispiel #2
0
    def test_contstrained(self, max_weight):
        rets = torch.tensor([[1.7909, -2, -0.6818, -0.4972, 0.0333]])

        w_const = SparsemaxAllocator(5, temperature=1, max_weight=max_weight)(rets)
        w_unconst = SparsemaxAllocator(5, temperature=1)(rets)

        assert not torch.allclose(w_const, w_unconst)
        assert w_const.max().item() == pytest.approx(max_weight, abs=1e-5)
Beispiel #3
0
    def test_known(self):
        rets = torch.tensor([[1.7909, 0.3637, -0.6818, -0.4972, 0.0333],
                             [0.6655, -0.9960, 1.1463, 1.9849, -0.1662]])

        true_weights = torch.tensor([[1.0000, 0.0000, 0.0000, 0.0000, 0.0000],
                                     [0.0000, 0.0000, 0.0807, 0.9193, 0.0000]])

        assert torch.allclose(SparsemaxAllocator(5, temperature=1)(rets), true_weights, atol=1e-4)
Beispiel #4
0
    def test_uniform(self):
        rets = torch.ones(2, 5)
        weights = SparsemaxAllocator(5, temperature=1)(rets)

        assert torch.allclose(weights, rets / 5)
Beispiel #5
0
 def test_errors(self):
     with pytest.raises(ValueError):
         SparsemaxAllocator(n_assets=2, max_weight=0.3)
Beispiel #6
0
max_weights = [0.2, 0.5, 1]

torch.manual_seed(seed)
logits = torch.rand(size=(1, n_assets)) - 0.5

fig, axs = plt.subplots(len(temperatures),
                        len(max_weights),
                        sharex=True,
                        sharey=True,
                        figsize=(15, 5))
cbar_ax = fig.add_axes([.91, .3, .03, .4])

for r, temperature in enumerate(temperatures):
    for c, max_weight in enumerate(max_weights):
        sparsemax = SparsemaxAllocator(n_assets,
                                       max_weight=max_weight,
                                       temperature=temperature)

        softmax = SoftmaxAllocator(n_assets=n_assets,
                                   temperature=temperature,
                                   max_weight=max_weight,
                                   formulation='variational')

        w_sparsemax = sparsemax(logits).detach().numpy()
        w_softmax = softmax(logits).detach().numpy()

        df = pd.DataFrame(np.concatenate([w_softmax, w_sparsemax], axis=0),
                          index=['softmax', 'sparsemax'])

        axs[r, c].set_title('temp={}, max_weight={}'.format(
            temperature, max_weight))