Пример #1
0
    def test_error(self):

        with pytest.raises(ValueError):
            RNN(2, 4, cell_type='FAKE')

        with pytest.raises(ValueError):
            RNN(3, 3, cell_type='LSTM', bidirectional=True)
Пример #2
0
    def test_n_parameters(self, n_channels, hidden_size, cell_type, bidirectional):
        layer = RNN(n_channels, hidden_size, bidirectional=bidirectional, cell_type=cell_type)

        n_parameters = sum(p.numel() for p in layer.parameters() if p.requires_grad)
        n_dir = (1 + int(bidirectional))
        hidden_size_a = int(hidden_size // n_dir)

        if cell_type == 'RNN':
            assert n_parameters == n_dir * (
                    (n_channels * hidden_size_a) + (hidden_size_a * hidden_size_a) + 2 * hidden_size_a)

        else:
            assert n_parameters == n_dir * 4 * (
                    (n_channels * hidden_size_a) + (hidden_size_a * hidden_size_a) + 2 * hidden_size_a)
Пример #3
0
 def __init__(
     self,
     n_external,
     n_assets,
     n_channels=5,
     hidden_size=32,
     max_weight=0.15,
     force_symmetric=True,
     p=0.2,
 ):
     super().__init__()
     self.force_symmetric = force_symmetric
     self.matrix = torch.nn.Parameter(torch.eye(n_assets),
                                      requires_grad=True)
     self.exp_returns = torch.nn.Parameter(torch.zeros(n_assets),
                                           requires_grad=True)
     self.norm_layer = torch.nn.InstanceNorm2d(n_channels, affine=True)
     self.collapse_external = AverageCollapse()
     self.transform_layer = RNN(n_channels, hidden_size=hidden_size)
     self.dropout_layer = torch.nn.Dropout(p=p)
     self.dropout_layer2 = torch.nn.Dropout(p=p)
     self.time_collapse_layer = AttentionCollapse(n_channels=hidden_size)
     self.conv1 = Conv(n_input_channels=hidden_size,
                       n_output_channels=1,
                       method="1D")
     # self.conv2 = Conv(n_input_channels=3, n_output_channels=1, method="1D")
     self.linear_transform = torch.nn.Linear(n_external, n_assets)
     self.linear_2 = torch.nn.Linear(n_external, n_assets)
     self.covariance_layer = CovarianceMatrix(sqrt=False,
                                              shrinkage_strategy="diagonal")
     self.gamma_sqrt = torch.nn.Parameter(torch.ones(1), requires_grad=True)
     self.alpha = torch.nn.Parameter(torch.ones(1), requires_grad=True)
     self.preliminar_weights_layer = NumericalMarkowitz(
         n_assets, max_weight=max_weight)
Пример #4
0
    def test_basic(self, Xy_dummy, hidden_size, bidirectional, cell_type, n_layers):
        X, _, _, _ = Xy_dummy
        n_samples, n_channels, lookback, n_assets = X.shape

        layer_inst = RNN(n_channels,
                         hidden_size,
                         n_layers=n_layers,
                         bidirectional=bidirectional,
                         cell_type=cell_type)

        layer_inst.to(device=X.device, dtype=X.dtype)

        res = layer_inst(X)

        assert torch.is_tensor(res)
        assert X.ndim == res.ndim
        assert X.device == res.device
        assert X.dtype == res.dtype

        assert X.shape[0] == res.shape[0]
        assert res.shape[1] == hidden_size
        assert X.shape[2:] == res.shape[2:]