예제 #1
0
    def test_errors(self, dataloader_dummy, network_dummy):
        with pytest.raises(TypeError):
            generate_metrics_table({'bm_1': 'WRONG'}, dataloader_dummy, {'metric': MeanReturns()})

        with pytest.raises(TypeError):
            generate_metrics_table({'bm_1': network_dummy}, 'FAKE', {'metric': MeanReturns()})

        with pytest.raises(TypeError):
            generate_metrics_table({'bm_1': network_dummy}, dataloader_dummy, {'metric': 'FAKE'})
예제 #2
0
    def test_wrong_construction_1(self, dataloader_dummy):
        """Wrong positional arguments."""
        with pytest.raises(TypeError):
            Run('this_is_fake', MeanReturns(), dataloader_dummy)

        with pytest.raises(TypeError):
            Run(DummyNet(), 'this_is_fake', dataloader_dummy)

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), 'this_is_fake')
예제 #3
0
    def test_wrong_construction_2(self, dataloader_dummy):
        """Wrong keyword arguments."""
        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics='this_is_fake')

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'a': 'this_is_fake'})

        with pytest.raises(ValueError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'loss': MeanReturns()})

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders='this_is_fake')

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders={'val': 'this_is_fake'})

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks='this_is_fake')

        with pytest.raises(TypeError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'uniform': 'this_is_fake'})

        with pytest.raises(ValueError):
            Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'main': OneOverN()})
예제 #4
0
    def test_basic(self, dataloader_dummy, network_dummy):
        metrics_table = generate_metrics_table({'bm_1': network_dummy},
                                               dataloader_dummy,
                                               {'rets': MeanReturns()})

        assert isinstance(metrics_table, pd.DataFrame)
        assert len(metrics_table) == len(dataloader_dummy.dataset)
        assert {'metric', 'value', 'benchmark', 'timestamp'} == set(metrics_table.columns.to_list())
예제 #5
0
    def test_launch_interrupt(self, dataloader_dummy, monkeypatch):
        network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
        loss = MeanReturns()

        class TempCallback(Callback):
            def on_train_begin(self, metadata):
                raise KeyboardInterrupt()

        monkeypatch.setattr('time.sleep', lambda x: None)
        run = Run(network, loss, dataloader_dummy, callbacks=[TempCallback()])

        run.launch(n_epochs=1)
예제 #6
0
파일: conftest.py 프로젝트: zhaoy99/deepdow
def run_dummy(dataloader_dummy, network_dummy, Xy_dummy):
    """"""
    X_batch, y_batch, timestamps, asset_names = Xy_dummy

    device = X_batch.device
    dtype = X_batch.dtype

    return Run(network_dummy,
               MeanReturns(),
               dataloader_dummy,
               val_dataloaders={'val': dataloader_dummy},
               benchmarks={'bm': OneOverN()},
               device=device,
               dtype=dtype)
예제 #7
0
    def test_attributes_after_construction(self, dataloader_dummy, additional_kwargs):
        network = DummyNet()
        loss = MeanReturns()

        kwargs = {}
        if additional_kwargs:
            kwargs.update({'metrics': {'std': StandardDeviation()},
                           'val_dataloaders': {'val': dataloader_dummy},
                           'benchmarks': {'whatever': OneOverN()}})

        run = Run(network, loss, dataloader_dummy, **kwargs)

        assert network is run.network
        assert loss is run.loss
        assert dataloader_dummy is run.train_dataloader
        assert isinstance(run.metrics, dict)
        assert isinstance(run.val_dataloaders, dict)
예제 #8
0
    def test_launch(self, dataloader_dummy):
        network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1])
        loss = MeanReturns()
        run = Run(network, loss, dataloader_dummy)

        run.launch(n_epochs=1)
예제 #9
0
dataloader = RigidDataLoader(dataset,
                             indices=list(range(5000)),
                             batch_size=batch_size,
                             lookback=lookback)
val_dataloaders = {
    'train':
    dataloader,
    'val':
    RigidDataLoader(dataset,
                    indices=list(range(5020, 9800)),
                    batch_size=batch_size,
                    lookback=lookback)
}

run = Run(network,
          100 * MeanReturns(),
          dataloader,
          val_dataloaders=val_dataloaders,
          metrics={'sqweights': SquaredWeights()},
          benchmarks={
              '1overN': OneOverN(),
              'VAR': VARTrue(process),
              'Random': Random(),
              'InverseVol': InverseVolatility()
          },
          optimizer=torch.optim.Adam(network.parameters(), amsgrad=True),
          callbacks=[EarlyStoppingCallback('val', 'loss')])

history = run.launch(40)

fig, ax = plt.subplots(1, 1)
예제 #10
0
파일: iid.py 프로젝트: zachmeador/deepdow
X = np.stack(X_list, axis=0)[:, None, ...]
y = np.stack(y_list, axis=0)[:, None, ...]

dataset = InRAMDataset(X, y, asset_names=returns.columns)
dataloader = RigidDataLoader(dataset, batch_size=batch_size)

# %%
# The main feature of :code:`deepdow` is that it only cares about the final allocation that minimizes
# some function of **empirical** portfolio returns. Unlike with the sample estimators in the previous
# sections, one does not need to explicitly model the dynamics of the market and find allocation via
# the two step procedure. Below we define empirical counterparts of the convex optimization objectives
# (losses). Note that all losses in :code:`deepdow` have the *the lower the better* logic.

all_losses = {
    'minvar': StandardDeviation()**2,
    'maxret': MeanReturns(),
    'maxutil': MeanReturns() + gamma * StandardDeviation()**2
}

# %%
# Training and evaluation
# ^^^^^^^^^^^^^^^^^^^^^^^
# Now it is time to train!

deep_portfolios_c = {}
deep_portfolios_u = {}

for mode in ['u', 'c']:
    for loss_name, loss in all_losses.items():
        network = Net(n_assets, max_weight=max_weight if mode == 'c' else 1.)
        run = Run(
예제 #11
0
# dropout it is essential that we set the mode correctly based on what we are trying to do.
network = network.train(
)  # it is the default, however, just to make the distinction clear

# %%
# Training
# ^^^^^^^^
# It is now time to define our loss. Let's say we want to achieve multiple objectives at the same
# time. We want to minimize the drawdowns, maximize the mean returns and also maximize the Sharpe
# ratio. All of these losses are implemented in :code:`deepdow.losses`. To avoid confusion, they
# are always implemented in a way that **the lower the value of the loss the better**. To combine
# multiple objectives we can simply sum all of the individual losses. Similarly, if we want to
# assign more importance to one of them we can achieve this by multiplying by a constant. To learn
# more see :ref:`losses`.

loss = MaximumDrawdown() + 2 * MeanReturns() + SharpeRatio()

# %%
# Note that by default all the losses assume that we input logarithmic returns
# (:code:`input_type='log'`) and that they are in the 0th channel (:code:`returns_channel=0`).

# %%
# We now have all the ingredients ready for training of the neural network. :code:`deepdow` implements
# a simple wrapper :code:`Run` that implements the training loop and a minimal callback
# framework. For further information see :ref:`experiments`.

run = Run(network,
          loss,
          dataloader_train,
          val_dataloaders={'test': dataloader_test},
          optimizer=torch.optim.Adam(network.parameters(), amsgrad=True),