예제 #1
0
def store_tickerdata_file(datadir: Path, pair: str,
                          ticker_interval: str, data: list, is_zip: bool = False):
    """
    Stores tickerdata to file
    """
    filename = pair_data_filename(datadir, pair, ticker_interval)
    misc.file_dump_json(filename, data, is_zip=is_zip)
예제 #2
0
    def _save_result(self, epoch: Dict) -> None:
        """
        Save hyperopt results to file
        Store one line per epoch.
        While not a valid json object - this allows appending easily.
        :param epoch: result dictionary for this epoch.
        """
        epoch[FTHYPT_FILEVERSION] = 2
        with self.results_file.open('a') as f:
            rapidjson.dump(epoch,
                           f,
                           default=hyperopt_serializer,
                           number_mode=rapidjson.NM_NATIVE | rapidjson.NM_NAN)
            f.write("\n")

        self.num_epochs_saved += 1
        logger.debug(
            f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
            f"saved to '{self.results_file}'.")
        # Store hyperopt filename
        latest_filename = Path.joinpath(self.results_file.parent,
                                        LAST_BT_RESULT_FN)
        file_dump_json(latest_filename,
                       {'latest_hyperopt': str(self.results_file.name)},
                       log=False)
예제 #3
0
def test_file_dump_json() -> None:
    """
    Test file_dump_json()
    :return: None
    """
    file = 'freqtrade/tests/testdata/test_{id}.json'.format(id=str(uuid.uuid4()))
    data = {'bar': 'foo'}

    # check the file we will create does not exist
    assert os.path.isfile(file) is False

    # Create the Json file
    file_dump_json(file, data)

    # Check the file was create
    assert os.path.isfile(file) is True

    # Open the Json file created and test the data is in it
    with open(file) as data_file:
        json_from_file = json.load(data_file)

    assert 'bar' in json_from_file
    assert json_from_file['bar'] == 'foo'

    # Remove the file
    _clean_test_file(file)
예제 #4
0
def store_trades_file(datadir: Path, pair: str,
                      data: list, is_zip: bool = True):
    """
    Stores tickerdata to file
    """
    filename = pair_trades_filename(datadir, pair)
    misc.file_dump_json(filename, data, is_zip=is_zip)
예제 #5
0
 def trades_store(self, pair: str, data: List[Dict]) -> None:
     """
     Store trades data (list of Dicts) to file
     :param pair: Pair - used for filename
     :param data: List of Dicts containing trade data
     """
     filename = self._pair_trades_filename(self._datadir, pair)
     misc.file_dump_json(filename, data, is_zip=self._use_zip)
예제 #6
0
def download_pair_history(datadir: Optional[Path],
                          exchange: Optional[Exchange],
                          pair: str,
                          ticker_interval: str = '5m',
                          timerange: Optional[TimeRange] = None) -> bool:
    """
    Download the latest ticker intervals from the exchange for the pair passed in parameters
    The data is downloaded starting from the last correct ticker interval data that
    exists in a cache. If timerange starts earlier than the data in the cache,
    the full data will be redownloaded

    Based on @Rybolov work: https://github.com/rybolov/freqtrade-data

    :param pair: pair to download
    :param ticker_interval: ticker interval
    :param timerange: range of time to download
    :return: bool with success state
    """
    if not exchange:
        raise OperationalException(
            "Exchange needs to be initialized when downloading pair history data"
        )

    try:
        filename = pair_data_filename(datadir, pair, ticker_interval)

        logger.info(
            f'Download history data for pair: "{pair}", interval: {ticker_interval} '
            f'and store in {datadir}.')

        data, since_ms = load_cached_data_for_updating(filename,
                                                       ticker_interval,
                                                       timerange)

        logger.debug("Current Start: %s",
                     misc.format_ms_time(data[1][0]) if data else 'None')
        logger.debug("Current End: %s",
                     misc.format_ms_time(data[-1][0]) if data else 'None')

        # Default since_ms to 30 days if nothing is given
        new_data = exchange.get_history(
            pair=pair,
            ticker_interval=ticker_interval,
            since_ms=since_ms if since_ms else
            int(arrow.utcnow().shift(days=-3 * 365).float_timestamp) * 1000)
        data.extend(new_data)

        logger.debug("New Start: %s", misc.format_ms_time(data[0][0]))
        logger.debug("New End: %s", misc.format_ms_time(data[-1][0]))

        misc.file_dump_json(filename, data)
        return True

    except Exception as e:
        logger.error(
            f'Failed to download history data for pair: "{pair}", interval: {ticker_interval}. '
            f'Error: {e}')
        return False
예제 #7
0
 def trades_store(self, pair: str, data: TradeList) -> None:
     """
     Store trades data (list of Dicts) to file
     :param pair: Pair - used for filename
     :param data: List of Lists containing trade data,
                  column sequence as in DEFAULT_TRADES_COLUMNS
     """
     filename = self._pair_trades_filename(self._datadir, pair)
     misc.file_dump_json(filename, data, is_zip=self._use_zip)
예제 #8
0
def test_file_dump_json(mocker) -> None:
    """
    Test file_dump_json()
    :return: None
    """
    file_open = mocker.patch('freqtrade.misc.open', MagicMock())
    json_dump = mocker.patch('json.dump', MagicMock())
    file_dump_json('somefile', [1, 2, 3])
    assert file_open.call_count == 1
    assert json_dump.call_count == 1
예제 #9
0
def test_file_dump_json(mocker) -> None:
    file_open = mocker.patch('freqtrade.misc.open', MagicMock())
    json_dump = mocker.patch('json.dump', MagicMock())
    file_dump_json('somefile', [1, 2, 3])
    assert file_open.call_count == 1
    assert json_dump.call_count == 1
    file_open = mocker.patch('freqtrade.misc.gzip.open', MagicMock())
    json_dump = mocker.patch('json.dump', MagicMock())
    file_dump_json('somefile', [1, 2, 3], True)
    assert file_open.call_count == 1
    assert json_dump.call_count == 1
예제 #10
0
    def _store_backtest_result(self, recordfilename: Optional[str],
                               results: DataFrame) -> None:

        records = [(trade_entry.pair, trade_entry.profit_percent,
                    trade_entry.open_time.timestamp(),
                    trade_entry.close_time.timestamp(),
                    trade_entry.open_index - 1, trade_entry.trade_duration)
                   for index, trade_entry in results.iterrows()]

        if records:
            logger.info('Dumping backtest results to %s', recordfilename)
            file_dump_json(recordfilename, records)
예제 #11
0
def download_pair_history(datadir: Optional[Path],
                          exchange: Exchange,
                          pair: str,
                          ticker_interval: str = '5m',
                          timerange: Optional[TimeRange] = None) -> bool:
    """
    Download the latest ticker intervals from the exchange for the pair passed in parameters
    The data is downloaded starting from the last correct ticker interval data that
    exists in a cache. If timerange starts earlier than the data in the cache,
    the full data will be redownloaded

    Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
    :param pair: pair to download
    :param ticker_interval: ticker interval
    :param timerange: range of time to download
    :return: bool with success state

    """
    try:
        path = make_testdata_path(datadir)
        filepair = pair.replace("/", "_")
        filename = path.joinpath(f'{filepair}-{ticker_interval}.json')

        logger.info('Download the pair: "%s", Interval: %s', pair,
                    ticker_interval)

        data, since_ms = load_cached_data_for_updating(filename,
                                                       ticker_interval,
                                                       timerange)

        logger.debug("Current Start: %s",
                     misc.format_ms_time(data[1][0]) if data else 'None')
        logger.debug("Current End: %s",
                     misc.format_ms_time(data[-1][0]) if data else 'None')

        # Default since_ms to 30 days if nothing is given
        new_data = exchange.get_history(
            pair=pair,
            ticker_interval=ticker_interval,
            since_ms=since_ms if since_ms else
            int(arrow.utcnow().shift(days=-30).float_timestamp) * 1000)
        data.extend(new_data)

        logger.debug("New Start: %s", misc.format_ms_time(data[0][0]))
        logger.debug("New End: %s", misc.format_ms_time(data[-1][0]))

        misc.file_dump_json(filename, data)
        return True
    except BaseException:
        logger.info('Failed to download the pair: "%s", Interval: %s', pair,
                    ticker_interval)
        return False
예제 #12
0
def convert_file(filename: str, filename_new: str) -> None:
    """Converts a file from old format to ccxt format"""
    (pairdata, is_zip) = load_old_file(filename)
    if pairdata and type(pairdata) is list:
        if type(pairdata[0]) is list:
            logger.error("pairdata for %s already in new format", filename)
            return

    frame = parse_old_backtest_data(pairdata)
    # Convert frame to new format
    if frame is not None:
        frame1 = convert_dataframe(frame)
        misc.file_dump_json(filename_new, frame1, is_zip)
예제 #13
0
 def _save_results(self) -> None:
     """
     Save hyperopt results to file
     """
     num_epochs = len(self.epochs)
     if num_epochs > self.num_epochs_saved:
         logger.debug(f"Saving {num_epochs} {plural(num_epochs, 'epoch')}.")
         dump(self.epochs, self.results_file)
         self.num_epochs_saved = num_epochs
         logger.debug(f"{self.num_epochs_saved} {plural(self.num_epochs_saved, 'epoch')} "
                      f"saved to '{self.results_file}'.")
         # Store hyperopt filename
         latest_filename = Path.joinpath(self.results_file.parent, LAST_BT_RESULT_FN)
         file_dump_json(latest_filename, {'latest_hyperopt': str(self.results_file.name)},
                        log=False)
예제 #14
0
    def _store_backtest_result(self, recordfilename: str, results: DataFrame,
                               strategyname: Optional[str] = None) -> None:

        records = [(t.pair, t.profit_percent, t.open_time.timestamp(),
                    t.close_time.timestamp(), t.open_index - 1, t.trade_duration,
                    t.open_rate, t.close_rate, t.open_at_end, t.sell_reason.value)
                   for index, t in results.iterrows()]

        if records:
            if strategyname:
                # Inject strategyname to filename
                recname = Path(recordfilename)
                recordfilename = str(Path.joinpath(
                    recname.parent, f'{recname.stem}-{strategyname}').with_suffix(recname.suffix))
            logger.info('Dumping backtest results to %s', recordfilename)
            file_dump_json(recordfilename, records)
예제 #15
0
def download_backtesting_testdata(datadir: str,
                                  exchange: Exchange,
                                  pair: str,
                                  tick_interval: str = '5m',
                                  timerange: Optional[TimeRange] = None) -> None:

    """
    Download the latest ticker intervals from the exchange for the pairs passed in parameters
    The data is downloaded starting from the last correct ticker interval data that
    esists in a cache. If timerange starts earlier than the data in the cache,
    the full data will be redownloaded

    Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
    :param pairs: list of pairs to download
    :param tick_interval: ticker interval
    :param timerange: range of time to download
    :return: None

    """

    path = make_testdata_path(datadir)
    filepair = pair.replace("/", "_")
    filename = os.path.join(path, f'{filepair}-{tick_interval}.json')

    logger.info(
        'Download the pair: "%s", Interval: %s',
        pair,
        tick_interval
    )

    data, since_ms = load_cached_data_for_updating(filename, tick_interval, timerange)

    logger.debug("Current Start: %s", misc.format_ms_time(data[1][0]) if data else 'None')
    logger.debug("Current End: %s", misc.format_ms_time(data[-1][0]) if data else 'None')

    new_data = exchange.get_ticker_history(pair=pair, tick_interval=tick_interval,
                                           since_ms=since_ms)
    data.extend(new_data)

    logger.debug("New Start: %s", misc.format_ms_time(data[0][0]))
    logger.debug("New End: %s", misc.format_ms_time(data[-1][0]))

    misc.file_dump_json(filename, data)
예제 #16
0
def store_backtest_stats(recordfilename: Path, stats: Dict[str, DataFrame]) -> None:
    """
    Stores backtest results
    :param recordfilename: Path object, which can either be a filename or a directory.
        Filenames will be appended with a timestamp right before the suffix
        while for diectories, <directory>/backtest-result-<datetime>.json will be used as filename
    :param stats: Dataframe containing the backtesting statistics
    """
    if recordfilename.is_dir():
        filename = (recordfilename /
                    f'backtest-result-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}.json')
    else:
        filename = Path.joinpath(
            recordfilename.parent,
            f'{recordfilename.stem}-{datetime.now().strftime("%Y-%m-%d_%H-%M-%S")}'
            ).with_suffix(recordfilename.suffix)
    file_dump_json(filename, stats)

    latest_filename = Path.joinpath(filename.parent, LAST_BT_RESULT_FN)
    file_dump_json(latest_filename, {'latest_backtest': str(filename.name)})
예제 #17
0
def store_backtest_result(recordfilename: Path, all_results: Dict[str, DataFrame]) -> None:
    """
    Stores backtest results to file (one file per strategy)
    :param recordfilename: Destination filename
    :param all_results: Dict of Dataframes, one results dataframe per strategy
    """
    for strategy, results in all_results.items():
        records = [(t.pair, t.profit_percent, t.open_time.timestamp(),
                    t.close_time.timestamp(), t.open_index - 1, t.trade_duration,
                    t.open_rate, t.close_rate, t.open_at_end, t.sell_reason.value)
                   for index, t in results.iterrows()]

        if records:
            if len(all_results) > 1:
                # Inject strategy to filename
                recordfilename = Path.joinpath(
                    recordfilename.parent,
                    f'{recordfilename.stem}-{strategy}').with_suffix(recordfilename.suffix)
            logger.info(f'Dumping backtest results to {recordfilename}')
            file_dump_json(recordfilename, records)
예제 #18
0
def store_backtest_result(recordfilename: Path,
                          all_results: Dict[str, DataFrame]) -> None:
    """
    Stores backtest results to file (one file per strategy)
    :param recordfilename: Destination filename
    :param all_results: Dict of Dataframes, one results dataframe per strategy
    """
    for strategy, results in all_results.items():
        records = backtest_result_to_list(results)

        if records:
            filename = recordfilename
            if len(all_results) > 1:
                # Inject strategy to filename
                filename = Path.joinpath(
                    recordfilename.parent,
                    f'{recordfilename.stem}-{strategy}').with_suffix(
                        recordfilename.suffix)
            logger.info(f'Dumping backtest results to {filename}')
            file_dump_json(filename, records)
예제 #19
0
def test_file_dump_json_tofile(testdatadir) -> None:
    file = testdatadir / 'test_{id}.json'.format(id=str(uuid.uuid4()))
    data = {'bar': 'foo'}

    # check the file we will create does not exist
    assert not file.is_file()

    # Create the Json file
    file_dump_json(file, data)

    # Check the file was create
    assert file.is_file()

    # Open the Json file created and test the data is in it
    with file.open() as data_file:
        json_from_file = json.load(data_file)

    assert 'bar' in json_from_file
    assert json_from_file['bar'] == 'foo'

    # Remove the file
    _clean_test_file(file)
예제 #20
0
def test_file_dump_json_tofile() -> None:
    file = os.path.join(os.path.dirname(__file__), '..', 'testdata',
                        'test_{id}.json'.format(id=str(uuid.uuid4())))
    data = {'bar': 'foo'}

    # check the file we will create does not exist
    assert os.path.isfile(file) is False

    # Create the Json file
    file_dump_json(file, data)

    # Check the file was create
    assert os.path.isfile(file) is True

    # Open the Json file created and test the data is in it
    with open(file) as data_file:
        json_from_file = json.load(data_file)

    assert 'bar' in json_from_file
    assert json_from_file['bar'] == 'foo'

    # Remove the file
    _clean_test_file(file)
예제 #21
0
def download_backtesting_testdata(datadir: str,
                                  pair: str,
                                  interval: int = 5) -> None:
    """
    Download the latest 1 and 5 ticker intervals from Bittrex for the pairs passed in parameters
    Based on @Rybolov work: https://github.com/rybolov/freqtrade-data
    """

    path = make_testdata_path(datadir)
    logger.info('Download the pair: "%s", Interval: %s min', pair, interval)

    filename = os.path.join(
        path, '{pair}-{interval}.json'.format(
            pair=pair.replace("-", "_"),
            interval=interval,
        ))

    if os.path.isfile(filename):
        with open(filename, "rt") as file:
            data = json.load(file)
    else:
        data = []

    logger.debug('Current Start: %s', data[1]['T'] if data else None)
    logger.debug('Current End: %s', data[-1:][0]['T'] if data else None)

    # Extend data with new ticker history
    data.extend([
        row
        for row in get_ticker_history(pair=pair, tick_interval=int(interval))
        if row not in data
    ])

    data = sorted(data, key=lambda _data: _data['T'])
    logger.debug('New Start: %s', data[1]['T'])
    logger.debug('New End: %s', data[-1:][0]['T'])
    misc.file_dump_json(filename, data)
예제 #22
0
from freqtrade.exchange import Bittrex

parser = misc.common_args_parser('download utility')
parser.add_argument(
        '-p', '--pair',
        help='JSON file containing pairs to download',
        dest='pair',
        default=None
)
args = parser.parse_args(sys.argv[1:])

TICKER_INTERVALS = [1, 5]  # ticker interval in minutes (currently implemented: 1 and 5)
PAIRS = []

if args.pair:
    with open(args.pair) as file:
        PAIRS = json.load(file)
PAIRS = list(set(PAIRS))

print('About to download pairs:', PAIRS)

# Init Bittrex exchange
exchange._API = Bittrex({'key': '', 'secret': ''})

for pair in PAIRS:
    for tick_interval in TICKER_INTERVALS:
        print('downloading pair %s, interval %s' % (pair, tick_interval))
        data = exchange.get_ticker_history(pair, tick_interval)
        filename = '{}-{}.json'.format(pair, tick_interval)
        misc.file_dump_json(filename, data)
예제 #23
0
                        'pair_whitelist': []
                        }
                     })
pairs_not_available = []

for pair in PAIRS:
    if pair not in exchange._api.markets:
        pairs_not_available.append(pair)
        print(f"skipping pair {pair}")
        continue
    for tick_interval in timeframes:
        print(f'downloading pair {pair}, interval {tick_interval}')

        data = exchange.get_ticker_history(pair, tick_interval, since_ms=since_time)
        if not data:
            print('\tNo data was downloaded')
            break

        print('\tData was downloaded for period %s - %s' % (
            arrow.get(data[0][0] / 1000).format(),
            arrow.get(data[-1][0] / 1000).format()))

        # save data
        pair_print = pair.replace('/', '_')
        filename = f'{pair_print}-{tick_interval}.json'
        misc.file_dump_json(os.path.join(dl_path, filename), data)


if pairs_not_available:
    print(f"Pairs [{','.join(pairs_not_available)}] not availble.")
예제 #24
0
    def backtest(self, args: Dict) -> DataFrame:
        """
        Implements backtesting functionality

        NOTE: This method is used by Hyperopt at each iteration. Please keep it optimized.
        Of course try to not have ugly code. By some accessor are sometime slower than functions.
        Avoid, logging on this method

        :param args: a dict containing:
            stake_amount: btc amount to use for each trade
            processed: a processed dictionary with format {pair, data}
            max_open_trades: maximum number of concurrent trades (default: 0, disabled)
            realistic: do we try to simulate realistic trades? (default: True)
            sell_profit_only: sell if profit only
            use_sell_signal: act on sell-signal
        :return: DataFrame
        """
        headers = ['date', 'buy', 'open', 'close', 'sell']
        processed = args['processed']
        max_open_trades = args.get('max_open_trades', 0)
        realistic = args.get('realistic', False)
        record = args.get('record', None)
        records = []
        trades = []
        trade_count_lock = {}
        for pair, pair_data in processed.items():
            pair_data['buy'], pair_data['sell'] = 0, 0  # cleanup from previous run

            ticker_data = self.populate_sell_trend(self.populate_buy_trend(pair_data))[headers]
            ticker = [x for x in ticker_data.itertuples()]

            lock_pair_until = None
            for index, row in enumerate(ticker):
                if row.buy == 0 or row.sell == 1:
                    continue  # skip rows where no buy signal or that would immediately sell off

                if realistic:
                    if lock_pair_until is not None and row.date <= lock_pair_until:
                        continue
                if max_open_trades > 0:
                    # Check if max_open_trades has already been reached for the given date
                    if not trade_count_lock.get(row.date, 0) < max_open_trades:
                        continue

                    trade_count_lock[row.date] = trade_count_lock.get(row.date, 0) + 1

                ret = self._get_sell_trade_entry(pair, row, ticker[index + 1:],
                                                 trade_count_lock, args)

                if ret:
                    row2, trade_entry, next_date = ret
                    lock_pair_until = next_date
                    trades.append(trade_entry)
                    if record:
                        # Note, need to be json.dump friendly
                        # record a tuple of pair, current_profit_percent,
                        # entry-date, duration
                        records.append((pair, trade_entry[1],
                                        row.date.strftime('%s'),
                                        row2.date.strftime('%s'),
                                        index, trade_entry[3]))
        # For now export inside backtest(), maybe change so that backtest()
        # returns a tuple like: (dataframe, records, logs, etc)
        if record and record.find('trades') >= 0:
            logger.info('Dumping backtest results')
            file_dump_json('backtest-result.json', records)
        labels = ['currency', 'profit_percent', 'profit_BTC', 'duration']
        return DataFrame.from_records(trades, columns=labels)
예제 #25
0
def store_backtest_result(config, all_data: Dict[str, Dict],
                          all_results: Dict[str, DataFrame]) -> None:
    """
    Stores backtest results to file (one file per strategy)
    :param config: The complete configuration
    :param all_data: A set of all hlcv data and indicators used by the strategies
    :param all_results: Dict of Dataframes, one results dataframe per strategy
    """
    recordfilename = config['exportfilename']
    if config['export'] == 'all':
        for strategy in all_data.items():
            data = {}
            for pair in strategy[1]:
                data[pair] = {}
                data[pair]["trades"] = []
                for index, trade in all_results[strategy[0]].iterrows():
                    if trade.pair == pair:
                        trade_dict = Series.to_dict(trade)
                        trade_dict['open_time'] = trade_dict[
                            'open_time'].timestamp()
                        trade_dict['close_time'] = trade_dict[
                            'close_time'].timestamp()
                        trade_dict['sell_reason'] = trade_dict[
                            'sell_reason'].value
                        data[pair]["trades"].append(trade_dict)

                candles_dict = DataFrame.to_dict(
                    strategy[1][pair][~strategy[1][pair].
                                      isin([np.nan, np.inf, -np.inf]).any(1)],
                    orient='index')
                data[pair]["candles"] = []
                for candle_key in candles_dict.keys():
                    candle_dict = candles_dict[candle_key]
                    candle_dict['date'] = candle_dict['date'].timestamp()
                    data[pair]["candles"].append(candle_dict)
            filename = recordfilename
            if len(all_data) > 1:
                filename = recordfilename
                # Inject strategy to filename
                filename = Path.joinpath(
                    recordfilename.parent,
                    f'{recordfilename.stem}-{strategy}').with_suffix(
                        recordfilename.suffix)
            logger.info(f'Dumping all of backtest results to {filename}')
            file_dump_json(filename, data)

    else:
        for strategy, results in all_results.items():
            records = [
                (t.pair, t.profit_percent, t.open_time.timestamp(),
                 t.close_time.timestamp(), t.open_index - 1, t.trade_duration,
                 t.open_rate, t.close_rate, t.open_at_end, t.sell_reason.value)
                for index, t in results.iterrows()
            ]

            if records:
                filename = recordfilename
                if len(all_results) > 1:
                    # Inject strategy to filename
                    filename = Path.joinpath(
                        recordfilename.parent,
                        f'{recordfilename.stem}-{strategy}').with_suffix(
                            recordfilename.suffix)
                logger.info(
                    f'Dumping trades of backtest results to {filename}')
                file_dump_json(filename, records)