def fitness(self, genes):
        '''
        Cost function in the optimization process
        _________________________________________
        Parameters
            genes: list(3)
                ordered parameters to optimize
        _________________________________________
        Return
            score: float(1)
                Error of the cost function ran with this solution
        '''

        # No evoluation in manager (Constant) configuration so reads it statically from file
        # We want here to optimize the algorithm parameters
        engine = Simulation()
        try:
            engine.configure(bt_cfg=self.bt_cfg,
                             a_cfg={'long_window': genes[0], 'ma_rate': float(genes[1] / 10.0), 'threshold': genes[2]},
                             m_cfg=None)
            results = engine.run_backtest()
            risk_metrics = engine.overall_metrics()
        except:
            pdb.set_trace()
            log.error('Exception caught while running cost function')
            return 1

        return self.evaluate(results, risk_metrics)
Ejemplo n.º 2
0
        results = engine.run_backtest()

        '''---------------------------------------------------------------------------------------------    Results   ----'''
        log.info('Portfolio returns: {}'.format(results.portfolio_value[-1]))

        if args['live'] or results.portfolio_value[-1] == 100000:
            # Currently tests don't last more than 20min, analysis is not relevant, neither backtest without orders
            sys.exit(0)

        #TODO Implement in datafeed a generic save method (which could call the correct database save method)
        #NOTE Could do a generic save client method (retrieve the correct model, with correct fields)
        perf_series  = engine.rolling_performances(timestamp='one_month', save=True, db_id=args['database'])
        #TODO save returns not ready yet, don't try to save
        #TODO more benchmarks choice (zipline modification)
        returns_df   = engine.get_returns(benchmark='^GSPC', save=False)
        risk_metrics = engine.overall_metrics(save=True, db_id=args['database'])

        #FIXME irrelevant results if no transactions were made
        log.info('\n\nReturns: {}% / {}%\nVolatility:\t{}\nSharpe:\t\t{}\nMax drawdown:\t{}\n\n'.format(
                 risk_metrics['Returns'] * 100.0,
                 risk_metrics['Benchmark.Returns'] * 100.0,
                 risk_metrics['Volatility'],
                 risk_metrics['Sharpe.Ratio'],
                 risk_metrics['Max.Drawdown']))

        # If we work in local, draw a quick summary plot
        if not args['remote']:
            data = returns_df.drop(['Returns', 'Benchmark.Returns'], axis=1)
            data.plot()
            plt.show()
Ejemplo n.º 3
0
        results = engine.run_backtest()

        '''---------------------------------------------------------------------------------------------    Results   ----'''
        log.info('Portfolio returns: {}'.format(results.portfolio_value[-1]))

        if args['live'] or results.portfolio_value[-1] == 100000:
            # Currently tests don't last more than 20min, analysis is not relevant, neither backtest without orders
            sys.exit(0)

        #TODO Implement in datafeed a generic save method (which could call the correct database save method)
        #NOTE Could do a generic save client method (retrieve the correct model, with correct fields)
        perf_series  = engine.rolling_performances(timestamp='one_month', save=False, db_id='test')
        #TODO save returns not ready yet, don't try to save
        #TODO more benchmarks choice (zipline modification)
        returns_df   = engine.get_returns(benchmark='SP500', save=False)
        risk_metrics = engine.overall_metrics(save=True, db_id='test')

        log.info('\n\nReturns: {}% / {}%\nVolatility:\t{}\nSharpe:\t\t{}\nMax drawdown:\t{}\n\n'.format(
                 risk_metrics['Returns'] * 100.0,
                 risk_metrics['Benchmark.Returns'] * 100.0,
                 risk_metrics['Volatility'],
                 risk_metrics['Sharpe.Ratio'],
                 risk_metrics['Max.Drawdown']))

        # If we work in local, draw a quick summary plot
        if not args['remote']:
            data = returns_df.drop(['Returns', 'Benchmark.Returns'], axis=1)
            data.plot()
            plt.show()

            # R statistical analysis
Ejemplo n.º 4
0
        results = engine.run_backtest()

        '''---------------------------------------------------------------------------------------------    Results   ----'''
        log.info('Portfolio returns: {}'.format(results.portfolio_value[-1]))

        if args['live'] or results.portfolio_value[-1] == args['cash']:
            # Currently tests don't last more than 20min, analysis is not relevant, neither backtest without orders
            sys.exit(0)

        #TODO Implement in datafeed a generic save method (which could call the correct database save method)
        #NOTE Could do a generic save client method (retrieve the correct model, with correct fields)
        perf_series  = engine.rolling_performances(timestamp='one_month', save=True, db_id=args['database'])
        #TODO save returns not ready yet, don't try to save
        #TODO more benchmarks choice (zipline modification)
        returns_df   = engine.get_returns(benchmark='cac', save=False)
        risk_metrics = engine.overall_metrics(metrics=perf_series, save=True, db_id=args['database'])

        #FIXME irrelevant results if no transactions were made
        log.info('\n\nReturns: {}% / {}%\nVolatility:\t{}\nSharpe:\t\t{}\nMax drawdown:\t{}\n\n'.format(
                 risk_metrics['Returns'] * 100.0,
                 risk_metrics['Benchmark.Returns'] * 100.0,
                 risk_metrics['Volatility'],
                 risk_metrics['Sharpe.Ratio'],
                 risk_metrics['Max.Drawdown']))

        # If we work in local, draw a quick summary plot
        if not args['remote']:
            data = returns_df.drop(['Returns', 'Benchmark.Returns'], axis=1)
            data.plot()
            plt.show()
            import ipdb; ipdb.set_trace()