Exemple #1
0
    def test_error(self, run_dummy, metadata_dummy):
        dataloader_name = list(run_dummy.val_dataloaders.keys())[0]
        metric_name = list(run_dummy.metrics.keys())[0]

        cb_wrong_dataloader = EarlyStoppingCallback(dataloader_name='fake',
                                                    metric_name=metric_name)
        cb_wrong_metric = EarlyStoppingCallback(dataloader_name=dataloader_name,
                                                metric_name='fake')

        cb_wrong_dataloader.run = run_dummy
        cb_wrong_metric.run = run_dummy

        with pytest.raises(ValueError):
            cb_wrong_dataloader.on_train_begin(metadata_dummy)

        with pytest.raises(ValueError):
            cb_wrong_metric.on_train_begin(metadata_dummy)
Exemple #2
0
    def test_basic(self, run_dummy, metadata_dummy):
        dataloader_name = list(run_dummy.val_dataloaders.keys())[0]
        metric_name = list(run_dummy.metrics.keys())[0]

        cb = EarlyStoppingCallback(dataloader_name=dataloader_name,
                                   metric_name=metric_name,
                                   patience=0)

        cb.run = run_dummy
        cb_val = ValidationCallback()
        cb_val.run = run_dummy

        run_dummy.callbacks = [cb_val, cb]  # make sure there are no default callbacks

        with pytest.raises(EarlyStoppingException):
            for method_name in ALL_METHODS:
                getattr(run_dummy, method_name)(metadata_dummy)

        cb.on_train_interrupt({'exception': EarlyStoppingException()})
Exemple #3
0
     n_external=len(stocks_df.columns.levels[0]),
     n_assets=len(df.columns.levels[0]),
     max_weight=0.10,
 )
 run = train_model(network,
                   train_dataloader,
                   val_dataloaders,
                   create_optimizer(network, args.optimizer, args.lr),
                   callbacks=[
                       MLFlowCallback(
                           args.run_name,
                           mlflow_path="./mlflow_runs",
                           experiment_name="economistnet",
                           log_benchmarks=True,
                       ),
                       EarlyStoppingCallback("val", "loss"),
                   ],
                   epochs=args.epochs,
                   device="cuda",
                   loss_="maximum_drawdown")
 save_network("complete_2703", run, direc="models_2703")
 submission_df = pd.read_csv(
     "/home/alejandro.vaca/reto_series_temporales/submission/submission.csv",
     parse_dates=True,
     index_col="eod_ts",
 )
 # future_df = create_synthetic_future_df(candles, submission_df)
 stocks_test = create_stocks_df()
 stocks_test = stocks_test.resample("D").aggregate("mean").ffill()
 stocks_test = _cut_special(stocks_test,
                            first="2020-05-01",
Exemple #4
0
                    lookback=lookback)
}

run = Run(network,
          100 * MeanReturns(),
          dataloader,
          val_dataloaders=val_dataloaders,
          metrics={'sqweights': SquaredWeights()},
          benchmarks={
              '1overN': OneOverN(),
              'VAR': VARTrue(process),
              'Random': Random(),
              'InverseVol': InverseVolatility()
          },
          optimizer=torch.optim.Adam(network.parameters(), amsgrad=True),
          callbacks=[EarlyStoppingCallback('val', 'loss')])

history = run.launch(40)

fig, ax = plt.subplots(1, 1)
ax.set_title('Validation loss')

per_epoch_results = history.metrics.groupby(
    ['dataloader', 'metric', 'model', 'epoch'])['value'].mean()['val']['loss']
our = per_epoch_results['network']
our.plot(ax=ax, label='network')

ax.hlines(y=per_epoch_results['VAR'],
          xmin=0,
          xmax=len(our),
          color='red',
Exemple #5
0
# Training and evaluation
# ^^^^^^^^^^^^^^^^^^^^^^^
# Now it is time to train!

deep_portfolios_c = {}
deep_portfolios_u = {}

for mode in ['u', 'c']:
    for loss_name, loss in all_losses.items():
        network = Net(n_assets, max_weight=max_weight if mode == 'c' else 1.)
        run = Run(
            network,
            loss,
            dataloader,
            val_dataloaders={'train': dataloader},
            callbacks=[EarlyStoppingCallback('train', 'loss', patience=3)])

        run.launch(n_epochs=n_epochs)

        # Results
        w_pred = network(torch.ones(
            1,
            n_assets)).detach().numpy().squeeze()  # the input does not matter

        if mode == 'c':
            deep_portfolios_c[loss_name] = w_pred
        else:
            deep_portfolios_u[loss_name] = w_pred

# %%
# Unconstrained case:
Exemple #6
0
# Note that by default all the losses assume that we input logarithmic returns
# (:code:`input_type='log'`) and that they are in the 0th channel (:code:`returns_channel=0`).

# %%
# We now have all the ingredients ready for training of the neural network. :code:`deepdow` implements
# a simple wrapper :code:`Run` that implements the training loop and a minimal callback
# framework. For further information see :ref:`experiments`.

run = Run(network,
          loss,
          dataloader_train,
          val_dataloaders={'test': dataloader_test},
          optimizer=torch.optim.Adam(network.parameters(), amsgrad=True),
          callbacks=[
              EarlyStoppingCallback(metric_name='loss',
                                    dataloader_name='test',
                                    patience=15)
          ])
# %%
# To run the training loop, we use the :code:`launch` where we specify the number of epochs.
history = run.launch(30)

# %%
# Evaluation and visualization
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# The :code:`history` object returned by :code:`launch` contains a lot of useful information related
# to training. Specifically, the property :code:`metrics` returns a comprehensive :code:`pd.DataFrame`.
# To display the average test loss per each epoch we can run following.

per_epoch_results = history.metrics.groupby(
    ['dataloader', 'metric', 'model', 'epoch'])['value']