def fitness(self, genes): ''' Cost function in the optimization process _________________________________________ Parameters genes: list(3) ordered parameters to optimize _________________________________________ Return score: float(1) Error of the cost function ran with this solution ''' # No evoluation in manager (Constant) configuration so reads it statically from file # We want here to optimize the algorithm parameters engine = Simulation() try: engine.configure(bt_cfg=self.bt_cfg, a_cfg={'long_window': genes[0], 'ma_rate': float(genes[1] / 10.0), 'threshold': genes[2]}, m_cfg=None) results = engine.run_backtest() risk_metrics = engine.overall_metrics() except: pdb.set_trace() log.error('Exception caught while running cost function') return 1 return self.evaluate(results, risk_metrics)
from neuronquant.calculus.engine import Simulation from neuronquant.utils import color_setup, remote_setup, log if __name__ == '__main__': # use 'setup' configuration for logging with remote_setup.applicationbound(): '''------------------------------------------------------------------------------------------- Backtest ----''' # Backtest or live engine used engine = Simulation() # Read local (.cfg files and command line args) or remote (ZMQ Messages) backtest, algorithm and manager configuration args = engine.configure() # See neuronquant/calculus/engine.py or zipline for details on results dataframe results = engine.run_backtest() '''--------------------------------------------------------------------------------------------- Results ----''' log.info('Portfolio returns: {}'.format(results.portfolio_value[-1])) if args['live'] or results.portfolio_value[-1] == 100000: # Currently tests don't last more than 20min, analysis is not relevant, neither backtest without orders sys.exit(0) #TODO Implement in datafeed a generic save method (which could call the correct database save method) #NOTE Could do a generic save client method (retrieve the correct model, with correct fields) perf_series = engine.rolling_performances(timestamp='one_month', save=False, db_id='test') #TODO save returns not ready yet, don't try to save #TODO more benchmarks choice (zipline modification) returns_df = engine.get_returns(benchmark='SP500', save=False) risk_metrics = engine.overall_metrics(save=True, db_id='test')