Beispiel #1
0
 def __init__(
     self,
     n_assets,
     force_symmetric=True,
     n_clusters=5,
     n_init=10,
     init="random",
     random_state=None,
     max_weight=0.20,
 ):
     super().__init__()
     self.force_symmetric = force_symmetric
     # self.matrix = torch.nn.Parameter(torch.eye(n_assets), requires_grad=True)
     # self.exp_returns = torch.nn.Parameter(torch.zeros(n_assets), requires_grad=True)
     self.gamma_sqrt = torch.nn.Parameter(torch.ones(1), requires_grad=True)
     self.alpha = torch.nn.Parameter(torch.ones(1), requires_grad=True)
     # self.n_clusters = torch.nn.Parameter() # TODO: TRY TO IMPLEMENT THIS DIFFERENTIABLE.
     self.matrix = torch.nn.Parameter(torch.eye(n_assets),
                                      requires_grad=True)
     self.exp_returns = torch.nn.Parameter(torch.zeros(n_assets),
                                           requires_grad=True)
     # self.covariance_layer = CovarianceMatrix(sqrt=False, shrinkage_strategy="diagonal")
     # self.collapse_layer = AverageCollapse(collapse_dim=3)
     self.portfolio_opt_layer = Resample(
         allocator=NumericalMarkowitz(
             n_assets, max_weight=max_weight
         ),  # NCO(n_clusters=n_clusters, n_init=n_init, init=init, random_state=random_state),
         n_draws=10,
         n_portfolios=5,
     )
Beispiel #2
0
    def test_basic(self, dtype_device, allocator_class, random_state):
        dtype, device = dtype_device

        n_samples = 2
        n_assets = 3

        single_ = torch.rand(n_assets, n_assets, dtype=dtype, device=device)
        single = single_ @ single_.t()
        covmat = torch.stack([single for _ in range(n_samples)], dim=0)
        rets = torch.rand(n_samples,
                          n_assets,
                          dtype=dtype,
                          device=device,
                          requires_grad=True)

        if allocator_class.__name__ == 'AnalyticalMarkowitz':
            allocator = allocator_class()
            kwargs = {}
        elif allocator_class.__name__ == 'NCO':
            allocator = allocator_class(n_clusters=2)
            kwargs = {}

        elif allocator_class.__name__ == 'NumericalMarkowitz':
            allocator = allocator_class(n_assets=n_assets)
            kwargs = {
                'gamma': torch.ones(n_samples, dtype=dtype, device=device),
                'alpha': torch.ones(n_samples, dtype=dtype, device=device)
            }

        resample_layer = Resample(allocator,
                                  n_portfolios=2,
                                  sqrt=False,
                                  random_state=random_state)

        weights_1 = resample_layer(covmat, rets=rets, **kwargs)
        weights_2 = resample_layer(covmat, rets=rets, **kwargs)

        assert weights_1.shape == (n_samples, n_assets)
        assert weights_1.device == device
        assert weights_1.dtype == dtype

        if random_state is None:
            assert not torch.allclose(weights_1, weights_2)
        else:
            assert torch.allclose(weights_1, weights_2)

        # Make sure one can run backward pass (just sum the weights to get a scalar)
        some_loss = weights_1.sum()

        assert rets.grad is None

        some_loss.backward()

        assert rets.grad is not None
        assert single.grad is None
Beispiel #3
0
 def test_error(self):
     with pytest.raises(TypeError):
         Resample('wrong_type')