Esempio n. 1
0
async def main():
    exchange = sys.argv[1]
    user = sys.argv[2]

    settings_filepath = os.path.join('backtesting_settings', exchange, '')
    backtesting_settings = \
        json.load(open(os.path.join(settings_filepath, 'backtesting_settings.json')))
    symbol = backtesting_settings['symbol']
    n_days = backtesting_settings['n_days']
    ranges = json.load(open(os.path.join(settings_filepath, 'ranges.json')))
    print(settings_filepath)
    results_filepath = make_get_filepath(
        os.path.join(
            'backtesting_results', exchange,
            ts_to_date(time())[:19].replace(':', '_') +
            f'_{int(round(n_days))}', ''))

    trade_cache_filepath = make_get_filepath(
        os.path.join(settings_filepath, 'trade_cache', ''))
    trades_filename = f'{symbol}_raw_trades_{exchange}_{n_days}_days_{ts_to_date(time())[:10]}.npy'
    trades_filepath = f"{trade_cache_filepath}{trades_filename}"
    if os.path.exists(trades_filepath):
        print('loading cached trade list', trades_filepath)
        trades_list = np.load(trades_filepath, allow_pickle=True)
    else:
        agg_trades = await load_trades(exchange, user, symbol, n_days)
        print('preparing trades...')
        trades_list = prep_trades_list(agg_trades)
        np.save(trades_filepath, trades_list)
    jackrabbit(trades_list, backtesting_settings, ranges, results_filepath)
Esempio n. 2
0
async def main():
    exchange = sys.argv[1]
    user = sys.argv[2]

    settings_filepath = os.path.join('backtesting_settings', exchange, '')
    backtesting_settings = \
        json.load(open(os.path.join(settings_filepath, 'backtesting_settings.json')))

    try:
        session_name = sys.argv[3]
        print('\n\nusing given session name', session_name, '\n\n')
    except IndexError:
        session_name = backtesting_settings['session_name']
        print('\n\nusing session name from backtesting_settings.json',
              session_name, '\n\n')

    symbol = backtesting_settings['symbol']
    n_days = backtesting_settings['n_days']
    ranges = json.load(open(os.path.join(settings_filepath, 'ranges.json')))
    print(settings_filepath)
    results_filepath = make_get_filepath(
        os.path.join('backtesting_results', exchange, symbol, session_name,
                     ''))
    print(results_filepath)
    trades_list_filepath = os.path.join(
        results_filepath, f"{n_days}_days_trades_list_cache.npy")
    if os.path.exists(trades_list_filepath):
        print('loading cached trade list', trades_list_filepath)
        trades_list = np.load(trades_list_filepath, allow_pickle=True)
    else:
        agg_trades = await load_trades(exchange, user, symbol, n_days)
        print('preparing trades...')
        trades_list = prep_trades_list(agg_trades)
        np.save(trades_list_filepath, trades_list)
    jackrabbit(trades_list, backtesting_settings, ranges, results_filepath)
Esempio n. 3
0
def plot_wrap(bc, ticks, candidate):
    n_days = round_((ticks[-1][2] - ticks[0][2]) / (1000 * 60 * 60 * 24), 0.1)
    print('backtesting...')
    result, fdf = backtest_wrap(ticks, {
        **bc,
        **{
            'break_on': {}
        },
        **candidate
    },
                                do_print=True)
    if fdf is None or len(fdf) == 0:
        print('no trades')
        return
    backtest_config = {**bc, **candidate, **result}
    backtest_config['session_dirpath'] = make_get_filepath(
        os.path.join(
            'plots', bc['exchange'], bc['symbol'],
            f"{n_days}_days_{ts_to_date(time())[:19].replace(':', '')}", ''))
    fdf.to_csv(backtest_config['session_dirpath'] +
               f"backtest_trades_{result['key']}.csv")
    df = pd.DataFrame({
        'price': ticks[:, 0],
        'buyer_maker': ticks[:, 1],
        'timestamp': ticks[:, 2]
    })
    dump_plots(backtest_config, fdf, df)
Esempio n. 4
0
def backtest_tune(ticks: np.ndarray, backtest_config: dict, current_best: Union[dict, list] = None):
    config = create_config(backtest_config)
    n_days = round_((ticks[-1][2] - ticks[0][2]) / (1000 * 60 * 60 * 24), 0.1)
    session_dirpath = make_get_filepath(os.path.join('reports', backtest_config['exchange'], backtest_config['symbol'],
                                                     f"{n_days}_days_{ts_to_date(time())[:19].replace(':', '')}", ''))
    iters = 10
    if 'iters' in backtest_config:
        iters = backtest_config['iters']
    else:
        print('Parameter iters should be defined in the configuration. Defaulting to 10.')
    num_cpus = 2
    if 'num_cpus' in backtest_config:
        num_cpus = backtest_config['num_cpus']
    else:
        print('Parameter num_cpus should be defined in the configuration. Defaulting to 2.')
    n_particles = 10
    if 'n_particles' in backtest_config:
        n_particles = backtest_config['n_particles']
    phi1 = 1.4962
    phi2 = 1.4962
    omega = 0.7298
    if 'options' in backtest_config:
        phi1 = backtest_config['options']['c1']
        phi2 = backtest_config['options']['c2']
        omega = backtest_config['options']['w']
    current_best_params = []
    if current_best:
        if type(current_best) == list:
            for c in current_best:
                c = clean_start_config(c, config, backtest_config['ranges'])
                current_best_params.append(c)
        else:
            current_best = clean_start_config(current_best, config, backtest_config['ranges'])
            current_best_params.append(current_best)

    ray.init(num_cpus=num_cpus, logging_level=logging.FATAL, log_to_driver=False)
    pso = ng.optimizers.ConfiguredPSO(transform='identity', popsize=n_particles, omega=omega, phip=phi1, phig=phi2)
    algo = NevergradSearch(optimizer=pso, points_to_evaluate=current_best_params)
    algo = ConcurrencyLimiter(algo, max_concurrent=num_cpus)
    scheduler = AsyncHyperBandScheduler()

    analysis = tune.run(tune.with_parameters(wrap_backtest, ticks=ticks), metric='objective', mode='max', name='search',
                        search_alg=algo, scheduler=scheduler, num_samples=iters, config=config, verbose=1,
                        reuse_actors=True, local_dir=session_dirpath,
                        progress_reporter=LogReporter(metric_columns=['daily_gain',
                                                                      'closest_liquidation',
                                                                      'max_hours_between_fills',
                                                                      'objective'],
                                                      parameter_columns=[k for k in backtest_config['ranges'] if type(
                                                          config[k]) == ray.tune.sample.Float or type(
                                                          config[k]) == ray.tune.sample.Integer]))

    ray.shutdown()
    return analysis
Esempio n. 5
0
def plot_wrap(bc, ticks, live_config):
    n_days = round_((ticks[-1][2] - ticks[0][2]) / (1000 * 60 * 60 * 24), 0.1)
    print('n_days', round_(n_days, 0.1))
    config = {**bc, **live_config}
    print('backtesting...')
    fills, stats, did_finish = backtest(config, ticks, do_print=True)
    if not fills:
        print('no fills')
        return
    fdf, result = analyze_fills(fills, config, ticks[-1][2])
    config['result'] = result
    config['plots_dirpath'] = make_get_filepath(os.path.join(
        config['plots_dirpath'], f"{ts_to_date(time())[:19].replace(':', '')}", '')
    )
    fdf.to_csv(config['plots_dirpath'] + "fills.csv")
    df = pd.DataFrame({'price': ticks[:, 0], 'buyer_maker': ticks[:, 1], 'timestamp': ticks[:, 2]})
    dump_plots(config, fdf, df)
Esempio n. 6
0
async def load_trades(exchange: str, user: str, symbol: str,
                      n_days: float) -> pd.DataFrame:
    def skip_ids(id_, ids_):
        if id_ in ids_:
            print('skipping from', id_)
            while id_ in ids_:
                id_ -= 1
            print('           to', id_)
        return id_

    def load_cache():
        cache_filenames = [
            f for f in os.listdir(cache_filepath) if '.csv' in f
        ]
        if cache_filenames:
            print('loading cached trades')
            cache_df = pd.concat(
                [pd.read_csv(cache_filepath + f) for f in cache_filenames],
                axis=0)
            cache_df = cache_df.set_index('trade_id')
            return cache_df
        return None

    if exchange == 'binance':
        fetch_trades_func = binance_fetch_trades
    elif exchange == 'bybit':
        fetch_trades_func = bybit_fetch_trades
    else:
        print(exchange, 'not found')
        return
    cc = init_ccxt(exchange, user)
    filepath = make_get_filepath(
        os.path.join('historical_data', exchange, 'agg_trades_futures', symbol,
                     ''))
    cache_filepath = make_get_filepath(
        filepath.replace(symbol, symbol + '_cache'))
    age_limit = time() - 60 * 60 * 24 * n_days
    age_limit_millis = age_limit * 1000
    print('age_limit', ts_to_date(age_limit))
    cache_df = load_cache()
    trades_df, chunk_lengths = get_downloaded_trades(filepath,
                                                     age_limit_millis)
    ids = set()
    if trades_df is not None:
        ids.update(trades_df.index)
    if cache_df is not None:
        ids.update(cache_df.index)
    gaps = []
    if trades_df is not None and len(trades_df) > 0:
        #
        sids = sorted(ids)
        for i in range(1, len(sids)):
            if sids[i - 1] + 1 != sids[i]:
                gaps.append((sids[i - 1], sids[i]))
        if gaps:
            print('gaps', gaps)
        #
    prev_fetch_ts = time()
    new_trades = await fetch_trades_func(cc, symbol)
    k = 0
    while True:
        k += 1
        if (break_ :=
                new_trades[0]['timestamp'] <= age_limit_millis) or k % 20 == 0:
            print('caching trades...')
            new_tdf = pd.DataFrame(new_trades).set_index('trade_id')
            cache_filename = f'{cache_filepath}{new_tdf.index[0]}_{new_tdf.index[-1]}.csv'
            new_tdf.to_csv(cache_filename)
            new_trades = [new_trades[0]]
            if break_:
                break
        from_id = skip_ids(new_trades[0]['trade_id'] - 1, ids) - 999
        # wait at least 0.75 sec between each fetch
        sleep_for = max(0.0, 0.75 - (time() - prev_fetch_ts))
        await asyncio.sleep(sleep_for)
        prev_fetch_ts = time()
        new_trades = await fetch_trades_func(cc, symbol,
                                             from_id=from_id) + new_trades
        ids.update([e['trade_id'] for e in new_trades])
Esempio n. 7
0
def jackrabbit(trades_list: [dict], backtesting_settings: dict, ranges: dict,
               base_filepath: str):
    ks = backtesting_settings['n_jackrabbit_iterations']
    k = backtesting_settings['starting_k']
    ms = np.array([1 / (i / 2 + 16) for i in range(ks)])
    ms = ((ms - ms.min()) / (ms.max() - ms.min()))

    best_filepath = base_filepath + 'best.json'

    if backtesting_settings['starting_candidate_preference'][0] == 'best' and \
            os.path.exists(best_filepath):
        best = json.load(open(best_filepath))
        candidate = get_new_candidate(ranges, best, ms[k])
        print('\ncurrent best')
        print(json.dumps(best, indent=4, sort_keys=True))
    else:
        best = {k_: backtesting_settings[k_] for k_ in ranges}
        best['gain'] = -9e9
        if 'given' in backtesting_settings[
                'starting_candidate_preference'][:2]:
            candidate = best.copy()
            print('\nusing starting candidate from backtesting_settings')
        else:
            candidate = get_new_candidate(ranges, best, m=1.0)
            print('\nusing random starting candidate')
        print(json.dumps(candidate, indent=4, sort_keys=True))

    results = {}
    n_days = backtesting_settings['n_days']

    trades_filepath = make_get_filepath(
        os.path.join(base_filepath, 'backtest_trades', ''))
    results_filename = base_filepath + 'results.txt'
    json.dump(backtesting_settings,
              open(base_filepath + 'backtesting_settings.json', 'w'),
              indent=4,
              sort_keys=True)
    json.dump(ranges,
              open(base_filepath + 'ranges.json', 'w'),
              indent=4,
              sort_keys=True)

    while k < ks:
        mutation_coefficient = ms[k]
        if candidate['min_markup'] >= candidate['max_markup']:
            candidate['min_markup'] = candidate['max_markup']

        settings = {**backtesting_settings, **candidate}
        key = np.format_float_positional(hash(
            json.dumps({k_: candidate[k_]
                        for k_ in ranges})),
                                         trim='-')[1:20]
        if key in results:
            if os.path.exists(best_filepath):
                best = json.load(open(best_filepath))
            candidate = get_new_candidate(ranges, best, mutation_coefficient)
            continue
        print(f'\nk={k}, m={mutation_coefficient:.4f} candidate:\n', candidate)
        start_time = time()
        trades = backtest(trades_list, settings)
        print('\ntime elapsed', round(time() - start_time, 1), 'seconds')
        k += 1
        if not trades:
            print('\nno trades')
            if os.path.exists(best_filepath):
                best = json.load(open(best_filepath))
            candidate = get_new_candidate(ranges, best, mutation_coefficient)
            continue

        tdf = pd.DataFrame(trades).set_index('trade_id')
        closest_liq = ((tdf.price - tdf.liq_price).abs() / tdf.price).min()
        biggest_pos_size = tdf.pos_size.abs().max()
        n_closes = len(tdf[tdf.type == 'close'])
        pnl_sum = tdf.pnl.sum()
        loss_sum = tdf[tdf.type == 'stop_loss'].pnl.sum()
        abs_pos_sizes = tdf.pos_size.abs()
        gain = (pnl_sum +
                settings['starting_balance']) / settings['starting_balance']
        candidate['gain'] = gain
        average_daily_gain = gain**(1 / n_days)
        n_trades = len(tdf)
        result = {
            'n_closes': n_closes,
            'pnl_sum': pnl_sum,
            'loss_sum': loss_sum,
            'average_daily_gain': average_daily_gain,
            'gain': gain,
            'n_trades': n_trades,
            'closest_liq': closest_liq,
            'biggest_pos_size': biggest_pos_size,
            'n_days': n_days,
            'key': key
        }
        tdf.to_csv(f'{trades_filepath}{key}.csv')
        print('\n\n', result)
        results[key] = {**result, **candidate}

        if os.path.exists(best_filepath):
            best = json.load(open(best_filepath))

        if candidate['gain'] > best['gain']:
            best = candidate
            print('\n\n\n###############\nnew best', best,
                  '\naverage daily gain:', round(average_daily_gain,
                                                 5), '\n\n')
            print(settings, '\n')
            print(results[key], '\n\n')
            default_live_settings = load_settings(settings['exchange'],
                                                  do_print=False)
            live_settings = {
                k: settings[k] if k in settings else default_live_settings[k]
                for k in default_live_settings
            }
            live_settings['indicator_settings'] = {
                'tick_ema': {
                    'span': best['ema_span']
                },
                'do_long': backtesting_settings['do_long'],
                'do_shrt': backtesting_settings['do_shrt']
            }
            json.dump(live_settings,
                      open(base_filepath + 'best_result_live_settings.json',
                           'w'),
                      indent=4,
                      sort_keys=True)
            print('\n\n', json.dumps(live_settings, indent=4, sort_keys=True),
                  '\n\n')
            json.dump({
                **{
                    'gain': result['gain']
                },
                **best
            },
                      open(best_filepath, 'w'),
                      indent=4,
                      sort_keys=True)
        candidate = get_new_candidate(ranges, best, m=mutation_coefficient)
        with open(results_filename, 'a') as f:
            f.write(json.dumps(results[key]) + '\n')
async def main():
    exchange = sys.argv[1]
    user = sys.argv[2]

    settings_filepath = os.path.join('backtesting_settings', exchange, '')
    backtesting_settings = \
        json.load(open(os.path.join(settings_filepath, 'backtesting_settings.json')))

    try:
        session_name = sys.argv[3]
        print('\n\nusing given session name', session_name, '\n\n')
    except IndexError:
        session_name = backtesting_settings['session_name']
        print('\n\nusing session name from backtesting_settings.json',
              session_name, '\n\n')

    symbol = backtesting_settings['symbol']
    n_days = backtesting_settings['n_days']
    ranges = json.load(open(os.path.join(settings_filepath, 'ranges.json')))
    results_filepath = make_get_filepath(
        os.path.join('backtesting_results', exchange, symbol, session_name,
                     ''))
    print(results_filepath)

    settings_from_exchange_fp = results_filepath + 'settings_from_exchange.json'
    if os.path.exists(settings_from_exchange_fp):
        settings_from_exchange = json.load(open(settings_from_exchange_fp))
    else:
        tmp_live_settings = load_settings(exchange, do_print=False)
        tmp_live_settings['symbol'] = backtesting_settings['symbol']
        settings_from_exchange = {}
        if exchange == 'binance':
            bot = await create_bot_binance(user, tmp_live_settings)
            settings_from_exchange['inverse'] = False
            settings_from_exchange['maker_fee'] = 0.00018
            settings_from_exchange['taker_fee'] = 0.00036
            settings_from_exchange['exchange'] = 'binance'
        elif exchange == 'bybit':
            bot = await create_bot_bybit(user, tmp_live_settings)
            settings_from_exchange['inverse'] = True
            settings_from_exchange['maker_fee'] = -0.00025
            settings_from_exchange['taker_fee'] = 0.00075
            settings_from_exchange['exchange'] = 'bybit'
        settings_from_exchange['min_qty'] = bot.min_qty
        settings_from_exchange['min_notional'] = bot.min_notional
        settings_from_exchange['qty_step'] = bot.qty_step
        settings_from_exchange['price_step'] = bot.price_step
        settings_from_exchange['max_leverage'] = bot.max_leverage
        await bot.cc.close()
        json.dump(settings_from_exchange,
                  open(settings_from_exchange_fp, 'w'),
                  indent=4)
    if 'leverage' in ranges:
        ranges['leverage'][1] = min(ranges['leverage'][1],
                                    settings_from_exchange['max_leverage'])
        ranges['leverage'][0] = min(ranges['leverage'][0],
                                    ranges['leverage'][1])

    backtesting_settings = {**backtesting_settings, **settings_from_exchange}
    print(json.dumps(backtesting_settings, indent=4))
    trades_list_filepath = os.path.join(
        results_filepath, f"{n_days}_days_trades_list_cache.npy")
    if os.path.exists(trades_list_filepath):
        print('loading cached trade list', trades_list_filepath)
        trades_list = np.load(trades_list_filepath, allow_pickle=True)
    else:
        agg_trades = await load_trades(exchange, user, symbol, n_days)
        print('preparing trades...')
        trades_list = prep_trades_list(agg_trades)
        np.save(trades_list_filepath, trades_list)
    jackrabbit(trades_list, backtesting_settings, ranges, results_filepath)
Esempio n. 9
0
def jackrabbit(trades_list: [dict], backtesting_settings: dict, ranges: dict,
               base_filepath: str):

    if backtesting_settings['random_starting_candidate']:
        best = {
            key: calc_new_val((abs(ranges[key][1]) - abs(ranges[key][0])) / 2,
                              ranges[key], 1.0)
            for key in sorted(ranges)
        }
        print('random starting candidate:', best)
    else:
        best = sort_dict_keys({k_: backtesting_settings[k_] for k_ in ranges})

    n_days = backtesting_settings['n_days']
    results = {}
    best_gain = -9e9
    candidate = best

    ks = backtesting_settings['n_jackrabbit_iterations']
    k = backtesting_settings['starting_k']
    ms = np.array([1 / (i / 2 + 16) for i in range(ks)])
    ms = ((ms - ms.min()) / (ms.max() - ms.min()))
    trades_filepath = make_get_filepath(
        os.path.join(base_filepath, 'trades', ''))
    json.dump(backtesting_settings,
              open(base_filepath + 'backtesting_settings.json', 'w'),
              indent=4,
              sort_keys=True)

    print(backtesting_settings, '\n\n')

    while k < ks - 1:

        if candidate['min_markup'] >= candidate['max_markup']:
            candidate['min_markup'] = candidate['max_markup']

        settings_ = {**backtesting_settings, **candidate}
        key = format_dict(candidate)
        if key in results:
            print('\nskipping', key)
            candidate = get_new_candidate(ranges, best)
            continue
        print(f'\nk={k}, m={ms[k]:.4f} candidate:\n', candidate)
        start_time = time()
        trades = backtest(trades_list, settings_)
        print('\ntime elapsed', round(time() - start_time, 1), 'seconds')
        if not trades:
            print('\nno trades')
            candidate = get_new_candidate(ranges, best)
            continue
        k += 1
        tdf = pd.DataFrame(trades).set_index('trade_id')
        tdf.to_csv(trades_filepath + key + '.csv')
        closest_liq = ((tdf.price - tdf.liq_price).abs() / tdf.price).min()
        biggest_pos_size = tdf.pos_size.abs().max()
        n_closes = len(tdf[tdf.type == 'close'])
        pnl_sum = tdf.pnl.sum()
        loss_sum = tdf[tdf.type == 'stop_loss'].pnl.sum()
        abs_pos_sizes = tdf.pos_size.abs()
        gain = (pnl_sum + settings_['balance']) / settings_['balance']
        average_daily_gain = gain**(1 / n_days)
        n_trades = len(tdf)
        result = {
            'n_closes': n_closes,
            'pnl_sum': pnl_sum,
            'loss_sum': loss_sum,
            'average_daily_gain': average_daily_gain,
            'gain': gain,
            'n_trades': n_trades,
            'closest_liq': closest_liq,
            'biggest_pos_size': biggest_pos_size,
            'n_days': n_days
        }
        print('\n\n', result)
        results[key] = {**result, **candidate}

        if gain > best_gain:
            best = candidate
            best_gain = gain
            print('\n\n\n###############\nnew best', best,
                  '\naverage daily gain:', round(average_daily_gain,
                                                 5), '\n\n')
            print(settings_, '\n')
            print(results[key], '\n\n')
            default_live_settings = load_settings(settings_['exchange'],
                                                  print_=False)
            live_settings = {
                k: settings_[k] if k in settings_ else default_live_settings[k]
                for k in default_live_settings
            }
            live_settings['indicator_settings'] = {
                'ema': {
                    'span': best['ema_span']
                }
            }
            json.dump(live_settings,
                      open(base_filepath + 'best_result_live_settings.json',
                           'w'),
                      indent=4,
                      sort_keys=True)
            json.dump(results[key],
                      open(base_filepath + 'best_result.json', 'w'),
                      indent=4,
                      sort_keys=True)
        candidate = get_new_candidate(ranges, best, m=ms[k])
        pd.DataFrame(results).T.to_csv(base_filepath + 'results.csv')
async def load_trades(exchange: str, user: str, symbol: str,
                      n_days: float) -> pd.DataFrame:
    def skip_ids(id_, ids_):
        if id_ in ids_:
            print('skipping from', id_)
            while id_ in ids_:
                id_ -= 1
            print('           to', from_id)
        return id_

    cc = init_ccxt(exchange, user)
    try:
        if exchange == 'binance':
            fetch_trades_func = binance_fetch_trades
        elif exchange == 'bybit':
            fetch_trades_func = bybit_fetch_trades
        else:
            print(exchange, 'not found')
            return
        filepath = make_get_filepath(
            f'historical_data/{exchange}/agg_trades_futures/{symbol}/')
        cache_filepath = make_get_filepath(
            f'historical_data/{exchange}/agg_trades_futures/{symbol}_cache/')
        cache_filenames = [
            f for f in os.listdir(cache_filepath) if f.endswith('.csv')
        ]
        ids = set()
        if cache_filenames:
            print('loading cached trades...')
            cached_trades = pd.concat(
                [pd.read_csv(cache_filepath + f) for f in cache_filenames],
                axis=0)
            cached_trades = cached_trades.set_index('trade_id').sort_index()
            cached_trades = cached_trades[~cached_trades.index.duplicated()]
            ids.update(cached_trades.index)
        else:
            cached_trades = None
        age_limit = time() - 60 * 60 * 24 * n_days
        age_limit_millis = age_limit * 1000
        print('age_limit', ts_to_date(age_limit))
        chunk_iterator = iter_chunks(exchange, symbol)
        chunk = next(chunk_iterator)
        chunks = {} if chunk is None else {int(chunk.index[0]): chunk}
        if chunk is not None:
            ids.update(chunk.index)
        min_id = min(ids) if ids else 0
        new_trades = await fetch_trades_func(cc, symbol)
        cached_ids = set()
        k = 0
        while True:
            if new_trades[0]['timestamp'] <= age_limit_millis:
                break
            from_id = new_trades[0]['trade_id'] - 1
            while True:
                if chunk is None:
                    min_id = 0
                    break
                from_id = skip_ids(from_id, ids)
                if from_id < min_id:
                    chunk = next(chunk_iterator)
                    if chunk is None:
                        min_id = 0
                        break
                    else:
                        chunks[int(chunk.index[0])] = chunk
                        ids.update(chunk.index)
                        min_id = min(ids)
                        if chunk.timestamp.max() < age_limit_millis:
                            break
                else:
                    break
            from_id = skip_ids(from_id, ids)
            from_id -= 999
            new_trades = await fetch_trades_func(cc, symbol,
                                                 from_id=from_id) + new_trades
            k += 1
            if k % 20 == 0:
                print('dumping cache')
                cache_df = pd.DataFrame([
                    t for t in new_trades if t['trade_id'] not in cached_ids
                ]).set_index('trade_id')
                cache_df.to_csv(cache_filepath + str(int(time() * 1000)) +
                                '.csv')
                cached_ids.update(cache_df.index)
        new_trades_df = pd.DataFrame(new_trades).set_index('trade_id')
        trades_updated = pd.concat(list(chunks.values()) +
                                   [new_trades_df, cached_trades],
                                   axis=0)
        no_dup = trades_updated[~trades_updated.index.duplicated()]
        no_dup_sorted = no_dup.sort_index()
        chunk_size = 100000
        chunk_ids = no_dup_sorted.index // chunk_size * chunk_size
        for g in no_dup_sorted.groupby(chunk_ids):
            if g[0] not in chunks or len(chunks[g[0]]) != chunk_size:
                print('dumping chunk', g[0])
                g[1].to_csv(f'{filepath}{str(g[0])}.csv')
        for f in [
                f_ for f_ in os.listdir(cache_filepath) if f_.endswith('.csv')
        ]:
            os.remove(cache_filepath + f)
        await cc.close()
        return no_dup_sorted[no_dup_sorted.timestamp >= age_limit_millis]
    except KeyboardInterrupt:
        await cc.close()
def jackrabbit(exchange: str, agg_trades: pd.DataFrame):

    settings = {
        'ema_spans': [10000, 38036],
        'enter_long': True,
        'enter_shrt': True,
        'entry_amount': 0.001,
        'leverage': 108,
        'markup': 0.00143,
        'spread': 0.00001,
        'symbol': 'BTCUSDT'
    }
    '''
    best = {'ema_spans': (39256.0, 90333.0),
            'leverage': 79.0,
            'markup': 0.002025,
            'spread': 0.000239}

    best = {"ema_spans": (45876, 67689),
            "spread": -0.000149,
            "leverage": 91,
            "markup": 0.00093}
    '''

    if exchange == 'bybit':
        best = {
            'ema_spans': (20174, 61286),
            'leverage': 86.7,
            'markup': 0.0009,
            'spread': 0.0
        }
        ranges = {
            'ema_spans': (2000, 250000, 0),
            'leverage': (40, 100, 1),
            'markup': (0.0003, 0.006, 6),
            'spread': (-0.002, 0.002, 6)
        }
        price_step = 0.5
        inverse = True
        margin_cost_limit = 0.004
        maker_fee = -0.00025
        taker_fee = 0.00075
        settings['entry_amount'] = 1
    elif exchange == 'binance':
        best = {
            'ema_spans': (50000, 50000),
            'leverage': 90,
            'markup': 0.0015,
            'spread': 0.0
        }
        ranges = {
            'ema_spans': (2000, 250000, 0),
            'leverage': (40, 125, 0),
            'markup': (0.0006, 0.006, 6),
            'spread': (-0.002, 0.002, 6)
        }
        price_step = 0.01
        inverse = False
        margin_cost_limit = 160
        maker_fee = 0.00018
        taker_fee = 0.00036
        settings['entry_amount'] = 0.001
    else:
        raise Exception(f'exchage {exchange} not found')

    best = {k_: best[k_] for k_ in sorted(best)}

    results = {}
    best_gain = -99999
    candidate = best

    ks = 200
    k = 0
    ms = np.array([1 / (i * 0.1 + 1) for i in range(ks)])
    ms = (ms - ms.min()) / (ms.max() - ms.min())

    results_filename = make_get_filepath(
        f'jackrabbit_results/{exchange}/{ts_to_date(time())[:19]}')

    min_n_trades = len(agg_trades) / 5000
    print('min_n_trades', min_n_trades)
    conditions = [lambda r: r['n_trades'] > min_n_trades]

    while k < ks - 1:
        try:
            k += 1
            adf = agg_trades
            key = tuple([candidate[k_] for k_ in sorted(candidate)])
            if key in results:
                print('skipping', key)
                candidate = get_new_candidate(ranges, best)
                continue
            line = f'\n{k} m={ms[k]:.4f} best {tuple(best.values())}, '
            line += f'candidate {tuple(candidate.values())}'
            print(line)
            trades, adf = backtest(adf, {
                k_: candidate[k_] if k_ in candidate else settings[k_]
                for k_ in settings
            },
                                   margin_cost_limit=margin_cost_limit,
                                   maker_fee=maker_fee,
                                   taker_fee=taker_fee,
                                   inverse=inverse,
                                   price_step=price_step)
            if not trades:
                print('\nno trades')
                candidate = get_new_candidate(best)
                continue
            tdf = pd.DataFrame(trades).set_index('trade_id')
            result = {'net_pnl': tdf.realized_pnl.sum() - tdf.fee.sum()}
            result['amount_max'] = tdf.amount.max()
            result['amount_min'] = tdf.amount.min()
            result['amount_abs_max'] = tdf.amount.abs().max()
            result['amount_abs_sum'] = tdf.amount.abs().sum()
            result['n_trades'] = len(trades)
            result['max_n_ddown'] = tdf.n_double_downs.max()
            result['mean_n_ddown'] = tdf[
                tdf.n_double_downs >= 0].n_double_downs.mean()
            result['margin_cost_max'] = tdf.margin_cost.max()
            result['n_liquidations'] = len(tdf[tdf.type == 'liquidation'])
            result['gain'] = (result['net_pnl'] + result['margin_cost_max']) / \
                result['margin_cost_max']
            results[key] = result
            print(f'\n{result}')
            with open(results_filename + '.txt', 'a') as f:
                f.write(str(key) + ' ' + str(results[key]) + '\n')
            if result['gain'] > best_gain:
                if all([condition(result) for condition in conditions]):
                    best = candidate
                    best_gain = result['gain']
                    print('\n\nnew best', best, '\n', result, '\n\n')
            candidate = get_new_candidate(ranges, best, m=ms[k])
            pd.DataFrame(results).to_csv(results_filename + '.csv')
        except KeyboardInterrupt:
            return results
    return results
Esempio n. 12
0
def jackrabbit(agg_trades: pd.DataFrame):
    '''
    # settings for binance
    settings = {
        "default_qty": 0.001,
        "grid_step": 344,
        "leverage": 125,
        "maker_fee": 0.00018,
        "margin_limit": 60,
        "markups": (0.0038,),
        "min_qty": 0.001,
        "n_close_orders": 1,
        "n_entry_orders": 7,
        "price_step": 0.01,
        "qty_step": 0.001,
        "symbol": "BTCUSDT",
        "inverse": False,
        "break_on_loss": True,
    }
    ranges = {
        'default_qty': (settings['min_qty'], settings['min_qty'] * 1, settings['qty_step']),
        'grid_step': (10, 400, 1),
        'markups': (0.0005, 0.005, 0.0001),
        'n_close_orders': (1, 1, 1),
    }
    '''
    # settings for bybit
    settings = {
        "default_qty": 1.0,
        "grid_step": 344,
        "leverage": 100,
        "maker_fee": -0.00025,
        "margin_limit": 0.001,
        "markups": (0.0038, ),
        "min_qty": 1.0,
        "n_close_orders": 1,
        "n_entry_orders": 7,
        "price_step": 0.5,
        "qty_step": 1.0,
        "symbol": "BTCUSD",
        "inverse": True,
        "break_on_loss": True,
    }
    ranges = {
        'default_qty': (1, 30, 1),
        'grid_step': (1, 400, 1),
        'margin_limit': (0.001, 0.001, 0.0001),
        'markups': (0.0001, 0.01, 0.0001),
        'n_close_orders': (1, 10, 1),
    }

    tweakable = {
        'default_qty': 0.0,
        'grid_step': 0.0,
        'markups': (0.0, 0.0),
        'n_close_orders': 0.0
    }

    best = {}

    for key in tweakable:
        if type(tweakable[key]) == tuple:
            best[key] = tuple(
                sorted([
                    calc_new_val((ranges[key][1] - ranges[key][0]) / 2,
                                 ranges[key], 1.0) for _ in tweakable[key]
                ]))
        else:
            best[key] = calc_new_val((ranges[key][1] - ranges[key][0]) / 2,
                                     ranges[key], 1.0)

    # optional: uncomment to use settings as start candidate.
    #best = {k_: settings[k_] for k_ in sorted(ranges)}

    settings = sort_dict_keys(settings)
    best = sort_dict_keys(best)

    results = {}
    best_gain = -99999999
    candidate = best

    ks = 200
    k = 0
    ms = np.array([1 / (i / 2 + 16) for i in range(ks)])
    ms = ((ms - ms.min()) / (ms.max() - ms.min()))

    results_filename = make_get_filepath(
        f'jackrabbit_results_grid/{ts_to_date(time())[:19]}')
    if settings['inverse']:
        results_filename += '_inverse'

    n_days = (agg_trades.timestamp.iloc[-1] -
              agg_trades.timestamp.iloc[0]) / 1000 / 60 / 60 / 24
    settings['n_days'] = n_days
    print('n_days', n_days)

    # conditions for result approval
    conditions = [
        lambda r: True,
    ]

    df = prep_df(agg_trades)

    while k < ks - 1:
        try:
            k += 1
            key = tuple([candidate[k_] for k_ in sorted(candidate)])
            if key in results:
                print('skipping', key)
                candidate = get_new_candidate(ranges, best)
                continue
            line = f'\n{k} m={ms[k]:.4f} best {tuple(best.values())}, '
            line += f'candidate {tuple(candidate.values())}'
            print(line)
            settings_ = {
                k_: candidate[k_] if k_ in candidate else settings[k_]
                for k_ in sorted(settings)
            }
            trades = backtest(df, settings_)
            if not trades:
                print('\nno trades')
                candidate = get_new_candidate(ranges, best)
                continue
            tdf = pd.DataFrame(trades).set_index('trade_id')
            n_closes = len(tdf[tdf.type == 'close'])
            pnl_sum = tdf.pnl.sum()
            loss_sum = tdf[tdf.pnl < 0.0].pnl.sum()
            abs_pos_sizes = tdf.pos_size.abs()
            if settings['inverse']:
                max_margin_cost = (abs_pos_sizes / tdf.pos_price /
                                   settings_['leverage']).max()
            else:
                max_margin_cost = (abs_pos_sizes * tdf.pos_price /
                                   settings_['leverage']).max()
            gain = (pnl_sum +
                    settings_['margin_limit']) / settings_['margin_limit']
            n_trades = len(tdf)
            result = {
                'n_closes': n_closes,
                'pnl_sum': pnl_sum,
                'loss_sum': loss_sum,
                'max_margin_cost': max_margin_cost,
                'gain': gain,
                'n_trades': n_trades
            }
            print('\n', result)
            results[key] = result

            if gain > best_gain and all([c(results[key]) for c in conditions]):
                best = candidate
                best_gain = gain
                print('\n\nnew best', best, '\n', gain, '\n')
                print(settings_)
                print(results[key], '\n\n')
            candidate = get_new_candidate(ranges, best, m=ms[k])
            pd.DataFrame(results).T.to_csv(results_filename + '.csv')
        except KeyboardInterrupt:
            return results
    return results