def test_launch_interrupt(self, dataloader_dummy, monkeypatch): network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1]) loss = MeanReturns() class TempCallback(Callback): def on_train_begin(self, metadata): raise KeyboardInterrupt() monkeypatch.setattr('time.sleep', lambda x: None) run = Run(network, loss, dataloader_dummy, callbacks=[TempCallback()]) run.launch(n_epochs=1)
def train_model(network, train_dataloader, val_dataloaders, optimizer, callbacks, epochs=20, device="cpu", loss_="sharpe"): if loss_ == "sharpe": loss = SharpeRatio(returns_channel=0) else: loss = MaximumDrawdown(returns_channel=0) benchmarks = {"1overN": OneOverN()} metrics = { "drawdown": MaximumDrawdown(returns_channel=0), "sharpe": SharpeRatio(returns_channel=0) } run = Run( network, loss, train_dataloader, val_dataloaders=val_dataloaders, metrics=metrics, # benchmarks=benchmarks, device=torch.device(device), optimizer=optimizer, callbacks=callbacks, ) history = run.launch(n_epochs=epochs) return run
def test_launch(self, dataloader_dummy): network = DummyNet(n_channels=dataloader_dummy.dataset.X.shape[1]) loss = MeanReturns() run = Run(network, loss, dataloader_dummy) run.launch(n_epochs=1)
run = Run(network, 100 * MeanReturns(), dataloader, val_dataloaders=val_dataloaders, metrics={'sqweights': SquaredWeights()}, benchmarks={ '1overN': OneOverN(), 'VAR': VARTrue(process), 'Random': Random(), 'InverseVol': InverseVolatility() }, optimizer=torch.optim.Adam(network.parameters(), amsgrad=True), callbacks=[EarlyStoppingCallback('val', 'loss')]) history = run.launch(40) fig, ax = plt.subplots(1, 1) ax.set_title('Validation loss') per_epoch_results = history.metrics.groupby( ['dataloader', 'metric', 'model', 'epoch'])['value'].mean()['val']['loss'] our = per_epoch_results['network'] our.plot(ax=ax, label='network') ax.hlines(y=per_epoch_results['VAR'], xmin=0, xmax=len(our), color='red', label='VAR') ax.hlines(y=per_epoch_results['1overN'],
# Now it is time to train! deep_portfolios_c = {} deep_portfolios_u = {} for mode in ['u', 'c']: for loss_name, loss in all_losses.items(): network = Net(n_assets, max_weight=max_weight if mode == 'c' else 1.) run = Run( network, loss, dataloader, val_dataloaders={'train': dataloader}, callbacks=[EarlyStoppingCallback('train', 'loss', patience=3)]) run.launch(n_epochs=n_epochs) # Results w_pred = network(torch.ones( 1, n_assets)).detach().numpy().squeeze() # the input does not matter if mode == 'c': deep_portfolios_c[loss_name] = w_pred else: deep_portfolios_u[loss_name] = w_pred # %% # Unconstrained case: plot_scatter( title='Ground truth vs empirical estimates vs deep learning: Unconstrained',
# a simple wrapper :code:`Run` that implements the training loop and a minimal callback # framework. For further information see :ref:`experiments`. run = Run(network, loss, dataloader_train, val_dataloaders={'test': dataloader_test}, optimizer=torch.optim.Adam(network.parameters(), amsgrad=True), callbacks=[ EarlyStoppingCallback(metric_name='loss', dataloader_name='test', patience=15) ]) # %% # To run the training loop, we use the :code:`launch` where we specify the number of epochs. history = run.launch(30) # %% # Evaluation and visualization # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # The :code:`history` object returned by :code:`launch` contains a lot of useful information related # to training. Specifically, the property :code:`metrics` returns a comprehensive :code:`pd.DataFrame`. # To display the average test loss per each epoch we can run following. per_epoch_results = history.metrics.groupby( ['dataloader', 'metric', 'model', 'epoch'])['value'] print(per_epoch_results.count()) # double check number of samples each epoch print(per_epoch_results.mean()) # mean loss per epoch # %%