Example #1
0
def init_plotscript(config):
    """
    Initialize objects needed for plotting
    :return: Dict with tickers, trades and pairs
    """

    if "pairs" in config:
        pairs = config["pairs"]
    else:
        pairs = config["exchange"]["pair_whitelist"]

    # Set timerange to use
    timerange = TimeRange.parse_timerange(config.get("timerange"))

    tickers = history.load_data(
        datadir=Path(str(config.get("datadir"))),
        pairs=pairs,
        timeframe=config.get('ticker_interval', '5m'),
        timerange=timerange,
    )

    trades = load_trades(
        config['trade_source'],
        db_url=config.get('db_url'),
        exportfilename=config.get('exportfilename'),
    )
    trades = history.trim_dataframe(trades, timerange, 'open_time')
    return {
        "tickers": tickers,
        "trades": trades,
        "pairs": pairs,
    }
Example #2
0
    def start(self) -> None:
        data, timerange = self.backtesting.load_bt_data()

        preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data)

        # Trim startup period from analyzed dataframe
        for pair, df in preprocessed.items():
            preprocessed[pair] = trim_dataframe(df, timerange)
        min_date, max_date = get_timeframe(data)

        logger.info('Hyperopting with data from %s up to %s (%s days)..',
                    min_date.isoformat(), max_date.isoformat(),
                    (max_date - min_date).days)
        dump(preprocessed, self.tickerdata_pickle)

        # We don't need exchange instance anymore while running hyperopt
        self.backtesting.exchange = None  # type: ignore

        self.load_previous_results()

        cpus = cpu_count()
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
        config_jobs = self.config.get('hyperopt_jobs', -1)
        logger.info(f'Number of parallel jobs set as: {config_jobs}')

        self.dimensions = self.hyperopt_space()
        self.opt = self.get_optimizer(self.dimensions, config_jobs)

        if self.config.get('print_colorized', False):
            colorama_init(autoreset=True)

        try:
            with Parallel(n_jobs=config_jobs) as parallel:
                jobs = parallel._effective_n_jobs()
                logger.info(
                    f'Effective number of parallel workers used: {jobs}')
                EVALS = max(self.total_epochs // jobs, 1)
                for i in range(EVALS):
                    asked = self.opt.ask(n_points=jobs)
                    f_val = self.run_optimizer_parallel(parallel, asked, i)
                    self.opt.tell(asked, [v['loss'] for v in f_val])
                    self.fix_optimizer_models_list()
                    for j in range(jobs):
                        # Use human-friendly index here (starting from 1)
                        current = i * jobs + j + 1
                        val = f_val[j]
                        val['current_epoch'] = current
                        val['is_initial_point'] = current <= INITIAL_POINTS
                        logger.debug(f"Optimizer epoch evaluated: {val}")
                        is_best = self.is_best(val)
                        self.log_results(val)
                        self.trials.append(val)
                        if is_best or current % 100 == 0:
                            self.save_trials()
        except KeyboardInterrupt:
            print('User interrupted..')

        self.save_trials(final=True)
        self.log_trials_result()
def test_trim_dataframe(testdatadir) -> None:
    data = history.load_data(
        datadir=testdatadir,
        timeframe='1m',
        pairs=['UNITTEST/BTC']
    )['UNITTEST/BTC']
    min_date = int(data.iloc[0]['date'].timestamp())
    max_date = int(data.iloc[-1]['date'].timestamp())
    data_modify = data.copy()

    # Remove first 30 minutes (1800 s)
    tr = TimeRange('date', None, min_date + 1800, 0)
    data_modify = history.trim_dataframe(data_modify, tr)
    assert not data_modify.equals(data)
    assert len(data_modify) < len(data)
    assert len(data_modify) == len(data) - 30
    assert all(data_modify.iloc[-1] == data.iloc[-1])
    assert all(data_modify.iloc[0] == data.iloc[30])

    data_modify = data.copy()
    # Remove last 30 minutes (1800 s)
    tr = TimeRange(None, 'date', 0, max_date - 1800)
    data_modify = history.trim_dataframe(data_modify, tr)
    assert not data_modify.equals(data)
    assert len(data_modify) < len(data)
    assert len(data_modify) == len(data) - 30
    assert all(data_modify.iloc[0] == data.iloc[0])
    assert all(data_modify.iloc[-1] == data.iloc[-31])

    data_modify = data.copy()
    # Remove first 25 and last 30 minutes (1800 s)
    tr = TimeRange('date', 'date', min_date + 1500, max_date - 1800)
    data_modify = history.trim_dataframe(data_modify, tr)
    assert not data_modify.equals(data)
    assert len(data_modify) < len(data)
    assert len(data_modify) == len(data) - 55
    # first row matches 25th original row
    assert all(data_modify.iloc[0] == data.iloc[25])
Example #4
0
    def start(self) -> None:
        """
        Run a backtesting end-to-end
        :return: None
        """
        data: Dict[str, Any] = {}
        logger.info('Using stake_currency: %s ...',
                    self.config['stake_currency'])
        logger.info('Using stake_amount: %s ...', self.config['stake_amount'])
        # Use max_open_trades in backtesting, except --disable-max-market-positions is set
        if self.config.get('use_max_market_positions', True):
            max_open_trades = self.config['max_open_trades']
        else:
            logger.info(
                'Ignoring max_open_trades (--disable-max-market-positions was used) ...'
            )
            max_open_trades = 0

        data, timerange = self.load_bt_data()

        all_results = {}
        for strat in self.strategylist:
            logger.info("Running backtesting for Strategy %s",
                        strat.get_strategy_name())
            self._set_strategy(strat)

            # need to reprocess data every time to populate signals
            preprocessed = self.strategy.tickerdata_to_dataframe(data)

            # Trim startup period from analyzed dataframe
            for pair, df in preprocessed.items():
                preprocessed[pair] = history.trim_dataframe(df, timerange)
            min_date, max_date = history.get_timeframe(preprocessed)

            logger.info('Backtesting with data from %s up to %s (%s days)..',
                        min_date.isoformat(), max_date.isoformat(),
                        (max_date - min_date).days)
            # Execute backtest and print results
            all_results[self.strategy.get_strategy_name()] = self.backtest({
                'stake_amount':
                self.config.get('stake_amount'),
                'processed':
                preprocessed,
                'max_open_trades':
                max_open_trades,
                'position_stacking':
                self.config.get('position_stacking', False),
                'start_date':
                min_date,
                'end_date':
                max_date,
            })

        for strategy, results in all_results.items():

            if self.config.get('export', False):
                self._store_backtest_result(
                    Path(self.config['exportfilename']), results,
                    strategy if len(self.strategylist) > 1 else None)

            print(f"Result for strategy {strategy}")
            print(' BACKTESTING REPORT '.center(133, '='))
            print(self._generate_text_table(data, results))

            print(' SELL REASON STATS '.center(133, '='))
            print(self._generate_text_table_sell_reason(data, results))

            print(' LEFT OPEN TRADES REPORT '.center(133, '='))
            print(
                self._generate_text_table(data,
                                          results.loc[results.open_at_end],
                                          True))
            print()
        if len(all_results) > 1:
            # Print Strategy summary table
            print(' Strategy Summary '.center(133, '='))
            print(self._generate_text_table_strategy(all_results))
            print('\nFor more details, please look at the detail tables above')
Example #5
0
    def start(self) -> None:
        data, timerange = self.backtesting.load_bt_data()

        preprocessed = self.backtesting.strategy.tickerdata_to_dataframe(data)

        # Trim startup period from analyzed dataframe
        for pair, df in preprocessed.items():
            preprocessed[pair] = trim_dataframe(df, timerange)
        min_date, max_date = get_timeframe(data)

        logger.info('Hyperopting with data from %s up to %s (%s days)..',
                    min_date.isoformat(), max_date.isoformat(),
                    (max_date - min_date).days)
        dump(preprocessed, self.tickerdata_pickle)

        # We don't need exchange instance anymore while running hyperopt
        self.backtesting.exchange = None  # type: ignore

        self.trials = self.load_previous_results(self.trials_file)

        cpus = cpu_count()
        logger.info(f"Found {cpus} CPU cores. Let's make them scream!")
        config_jobs = self.config.get('hyperopt_jobs', -1)
        logger.info(f'Number of parallel jobs set as: {config_jobs}')

        self.dimensions: List[Dimension] = self.hyperopt_space()
        self.opt = self.get_optimizer(self.dimensions, config_jobs)

        if self.print_colorized:
            colorama_init(autoreset=True)

        try:
            with Parallel(n_jobs=config_jobs) as parallel:
                jobs = parallel._effective_n_jobs()
                logger.info(
                    f'Effective number of parallel workers used: {jobs}')
                EVALS = max(self.total_epochs // jobs, 1)
                for i in range(EVALS):
                    asked = self.opt.ask(n_points=jobs)
                    f_val = self.run_optimizer_parallel(parallel, asked, i)
                    self.opt.tell(asked, [v['loss'] for v in f_val])
                    self.fix_optimizer_models_list()
                    for j in range(jobs):
                        # Use human-friendly indexes here (starting from 1)
                        current = i * jobs + j + 1
                        val = f_val[j]
                        val['current_epoch'] = current
                        val['is_initial_point'] = current <= INITIAL_POINTS
                        logger.debug(f"Optimizer epoch evaluated: {val}")

                        is_best = self.is_best_loss(val,
                                                    self.current_best_loss)
                        # This value is assigned here and not in the optimization method
                        # to keep proper order in the list of results. That's because
                        # evaluations can take different time. Here they are aligned in the
                        # order they will be shown to the user.
                        val['is_best'] = is_best

                        self.print_results(val)

                        if is_best:
                            self.current_best_loss = val['loss']
                        self.trials.append(val)
                        # Save results after each best epoch and every 100 epochs
                        if is_best or current % 100 == 0:
                            self.save_trials()
        except KeyboardInterrupt:
            print('User interrupted..')

        self.save_trials(final=True)

        if self.trials:
            sorted_trials = sorted(self.trials, key=itemgetter('loss'))
            results = sorted_trials[0]
            self.print_epoch_details(results, self.total_epochs,
                                     self.print_json)
        else:
            # This is printed when Ctrl+C is pressed quickly, before first epochs have
            # a chance to be evaluated.
            print("No epochs evaluated yet, no best result.")