def train_model(network, train_dataloader, val_dataloaders, optimizer, callbacks, epochs=20, device="cpu", loss_="sharpe"): if loss_ == "sharpe": loss = SharpeRatio(returns_channel=0) else: loss = MaximumDrawdown(returns_channel=0) benchmarks = {"1overN": OneOverN()} metrics = { "drawdown": MaximumDrawdown(returns_channel=0), "sharpe": SharpeRatio(returns_channel=0) } run = Run( network, loss, train_dataloader, val_dataloaders=val_dataloaders, metrics=metrics, # benchmarks=benchmarks, device=torch.device(device), optimizer=optimizer, callbacks=callbacks, ) history = run.launch(n_epochs=epochs) return run
def test_wrong_construction_1(self, dataloader_dummy): """Wrong positional arguments.""" with pytest.raises(TypeError): Run('this_is_fake', MeanReturns(), dataloader_dummy) with pytest.raises(TypeError): Run(DummyNet(), 'this_is_fake', dataloader_dummy) with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), 'this_is_fake')
def test_launch_interrupt(self, dataloader_dummy, monkeypatch): network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1]) loss = MeanReturns() class TempCallback(Callback): def on_train_begin(self, metadata): raise KeyboardInterrupt() monkeypatch.setattr('time.sleep', lambda x: None) run = Run(network, loss, dataloader_dummy, callbacks=[TempCallback()]) run.launch(n_epochs=1)
def test_wrong_construction_2(self, dataloader_dummy): """Wrong keyword arguments.""" with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics='this_is_fake') with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'a': 'this_is_fake'}) with pytest.raises(ValueError): Run(DummyNet(), MeanReturns(), dataloader_dummy, metrics={'loss': MeanReturns()}) with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders='this_is_fake') with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, val_dataloaders={'val': 'this_is_fake'}) with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks='this_is_fake') with pytest.raises(TypeError): Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'uniform': 'this_is_fake'}) with pytest.raises(ValueError): Run(DummyNet(), MeanReturns(), dataloader_dummy, benchmarks={'main': OneOverN()})
def run_dummy(dataloader_dummy, network_dummy, Xy_dummy): """""" X_batch, y_batch, timestamps, asset_names = Xy_dummy device = X_batch.device dtype = X_batch.dtype return Run(network_dummy, MeanReturns(), dataloader_dummy, val_dataloaders={'val': dataloader_dummy}, benchmarks={'bm': OneOverN()}, device=device, dtype=dtype)
def test_attributes_after_construction(self, dataloader_dummy, additional_kwargs): network = DummyNet() loss = MeanReturns() kwargs = {} if additional_kwargs: kwargs.update({'metrics': {'std': StandardDeviation()}, 'val_dataloaders': {'val': dataloader_dummy}, 'benchmarks': {'whatever': OneOverN()}}) run = Run(network, loss, dataloader_dummy, **kwargs) assert network is run.network assert loss is run.loss assert dataloader_dummy is run.train_dataloader assert isinstance(run.metrics, dict) assert isinstance(run.val_dataloaders, dict)
def test_launch(self, dataloader_dummy): network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1]) loss = MeanReturns() run = Run(network, loss, dataloader_dummy) run.launch(n_epochs=1)
'train': dataloader, 'val': RigidDataLoader(dataset, indices=list(range(5020, 9800)), batch_size=batch_size, lookback=lookback) } run = Run(network, 100 * MeanReturns(), dataloader, val_dataloaders=val_dataloaders, metrics={'sqweights': SquaredWeights()}, benchmarks={ '1overN': OneOverN(), 'VAR': VARTrue(process), 'Random': Random(), 'InverseVol': InverseVolatility() }, optimizer=torch.optim.Adam(network.parameters(), amsgrad=True), callbacks=[EarlyStoppingCallback('val', 'loss')]) history = run.launch(40) fig, ax = plt.subplots(1, 1) ax.set_title('Validation loss') per_epoch_results = history.metrics.groupby( ['dataloader', 'metric', 'model', 'epoch'])['value'].mean()['val']['loss'] our = per_epoch_results['network']
} # %% # Training and evaluation # ^^^^^^^^^^^^^^^^^^^^^^^ # Now it is time to train! deep_portfolios_c = {} deep_portfolios_u = {} for mode in ['u', 'c']: for loss_name, loss in all_losses.items(): network = Net(n_assets, max_weight=max_weight if mode == 'c' else 1.) run = Run( network, loss, dataloader, val_dataloaders={'train': dataloader}, callbacks=[EarlyStoppingCallback('train', 'loss', patience=3)]) run.launch(n_epochs=n_epochs) # Results w_pred = network(torch.ones( 1, n_assets)).detach().numpy().squeeze() # the input does not matter if mode == 'c': deep_portfolios_c[loss_name] = w_pred else: deep_portfolios_u[loss_name] = w_pred
loss = MaximumDrawdown() + 2 * MeanReturns() + SharpeRatio() # %% # Note that by default all the losses assume that we input logarithmic returns # (:code:`input_type='log'`) and that they are in the 0th channel (:code:`returns_channel=0`). # %% # We now have all the ingredients ready for training of the neural network. :code:`deepdow` implements # a simple wrapper :code:`Run` that implements the training loop and a minimal callback # framework. For further information see :ref:`experiments`. run = Run(network, loss, dataloader_train, val_dataloaders={'test': dataloader_test}, optimizer=torch.optim.Adam(network.parameters(), amsgrad=True), callbacks=[ EarlyStoppingCallback(metric_name='loss', dataloader_name='test', patience=15) ]) # %% # To run the training loop, we use the :code:`launch` where we specify the number of epochs. history = run.launch(30) # %% # Evaluation and visualization # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # The :code:`history` object returned by :code:`launch` contains a lot of useful information related # to training. Specifically, the property :code:`metrics` returns a comprehensive :code:`pd.DataFrame`. # To display the average test loss per each epoch we can run following.