예제 #1
0
def main():
    from passivbot import load_settings
    settings = load_settings('default')
    fee = 1 - 0.0675 * 0.01 # vip1

    settings['hours_rolling_small_trade_window'] = 1.0
    settings['account_equity_pct_per_trade'] = 0.0002
    settings['exponent'] = 15


    symbols = [f'{c}/BTC' for c in settings['coins_long']]
    symbols = sorted(symbols)
    n_days = 30 * 13
    #symbols = [s for s in symbols if not any(s.startswith(c) for c in ['VET', 'IOST'])]
    print('loading ohlcvs')
    high_low_means = load_hlms(symbols, n_days, no_download=True)
    print('adding emas')
    df = add_emas(high_low_means, settings['ema_spans_minutes'])


    results = []
    for aepph in list(np.linspace(0.00037, 0.0005, 20).round(6)):
        for mmsd in list(np.linspace(120, 120, 1).round().astype(int)):
            print('testing', aepph, mmsd)
            settings['account_equity_pct_per_hour'] = aepph
            settings['max_memory_span_days'] = mmsd

            balance_list, lentr, sentr, lexit, sexit, lexitpl, sexitpl = backtest(df, settings)
            bldf = pd.DataFrame(balance_list).set_index('timestamp')
            start_equity = bldf.acc_equity_quot.iloc[0]
            end_equity = bldf.acc_equity_quot.iloc[-1]
            n_days = (bldf.index[-1] - bldf.index[0]) / 1000 / 60 / 60 / 24
            avg_daily_gain = (end_equity / start_equity)**(1 / n_days)
            print()
            print(aepph, mmsd, 'average daily gain', round(avg_daily_gain, 8))
            print(aepph, mmsd, '    low water mark', bldf.acc_equity_quot.min())
            print(aepph, mmsd, '   high water mark', bldf.acc_equity_quot.max())
            print(aepph, mmsd, '              mean', bldf.acc_equity_quot.mean())
            print(aepph, mmsd, '               end', bldf.acc_equity_quot.iloc[-1])

            print('\n\n')

            results.append({
                'account_equity_pct_per_hour': aepph,
                'max_memory_span_days': mmsd,
                'average daily gain': avg_daily_gain,
                'low water mark': bldf.acc_equity_quot.min(),
                'high water mark': bldf.acc_equity_quot.max(),
                'mean': bldf.acc_equity_quot.mean(),
                'end': bldf.acc_equity_quot.iloc[-1],
            })
    for r in sorted(results, key=lambda x: x['mean']):
        print(r)
예제 #2
0
async def main() -> None:
    bot = await create_bot(sys.argv[1], load_settings('bybit', sys.argv[1]))
    await start_bot(bot)
예제 #3
0
async def main() -> None:
    bot = await create_bot(sys.argv[1],
                           load_settings('binance_futures', sys.argv[1]))
    await start_bot(bot)
예제 #4
0
def jackrabbit(trades_list: [dict], backtesting_settings: dict, ranges: dict,
               base_filepath: str):
    ks = backtesting_settings['n_jackrabbit_iterations']
    k = backtesting_settings['starting_k']
    ms = np.array([1 / (i / 2 + 16) for i in range(ks)])
    ms = ((ms - ms.min()) / (ms.max() - ms.min()))

    best_filepath = base_filepath + 'best.json'

    if backtesting_settings['starting_candidate_preference'][0] == 'best' and \
            os.path.exists(best_filepath):
        best = json.load(open(best_filepath))
        candidate = get_new_candidate(ranges, best, ms[k])
        print('\ncurrent best')
        print(json.dumps(best, indent=4, sort_keys=True))
    else:
        best = {k_: backtesting_settings[k_] for k_ in ranges}
        best['gain'] = -9e9
        if 'given' in backtesting_settings[
                'starting_candidate_preference'][:2]:
            candidate = best.copy()
            print('\nusing starting candidate from backtesting_settings')
        else:
            candidate = get_new_candidate(ranges, best, m=1.0)
            print('\nusing random starting candidate')
        print(json.dumps(candidate, indent=4, sort_keys=True))

    results = {}
    n_days = backtesting_settings['n_days']

    trades_filepath = make_get_filepath(
        os.path.join(base_filepath, 'backtest_trades', ''))
    results_filename = base_filepath + 'results.txt'
    json.dump(backtesting_settings,
              open(base_filepath + 'backtesting_settings.json', 'w'),
              indent=4,
              sort_keys=True)
    json.dump(ranges,
              open(base_filepath + 'ranges.json', 'w'),
              indent=4,
              sort_keys=True)

    while k < ks:
        mutation_coefficient = ms[k]
        if candidate['min_markup'] >= candidate['max_markup']:
            candidate['min_markup'] = candidate['max_markup']

        settings = {**backtesting_settings, **candidate}
        key = np.format_float_positional(hash(
            json.dumps({k_: candidate[k_]
                        for k_ in ranges})),
                                         trim='-')[1:20]
        if key in results:
            if os.path.exists(best_filepath):
                best = json.load(open(best_filepath))
            candidate = get_new_candidate(ranges, best, mutation_coefficient)
            continue
        print(f'\nk={k}, m={mutation_coefficient:.4f} candidate:\n', candidate)
        start_time = time()
        trades = backtest(trades_list, settings)
        print('\ntime elapsed', round(time() - start_time, 1), 'seconds')
        k += 1
        if not trades:
            print('\nno trades')
            if os.path.exists(best_filepath):
                best = json.load(open(best_filepath))
            candidate = get_new_candidate(ranges, best, mutation_coefficient)
            continue

        tdf = pd.DataFrame(trades).set_index('trade_id')
        closest_liq = ((tdf.price - tdf.liq_price).abs() / tdf.price).min()
        biggest_pos_size = tdf.pos_size.abs().max()
        n_closes = len(tdf[tdf.type == 'close'])
        pnl_sum = tdf.pnl.sum()
        loss_sum = tdf[tdf.type == 'stop_loss'].pnl.sum()
        abs_pos_sizes = tdf.pos_size.abs()
        gain = (pnl_sum +
                settings['starting_balance']) / settings['starting_balance']
        candidate['gain'] = gain
        average_daily_gain = gain**(1 / n_days)
        n_trades = len(tdf)
        result = {
            'n_closes': n_closes,
            'pnl_sum': pnl_sum,
            'loss_sum': loss_sum,
            'average_daily_gain': average_daily_gain,
            'gain': gain,
            'n_trades': n_trades,
            'closest_liq': closest_liq,
            'biggest_pos_size': biggest_pos_size,
            'n_days': n_days,
            'key': key
        }
        tdf.to_csv(f'{trades_filepath}{key}.csv')
        print('\n\n', result)
        results[key] = {**result, **candidate}

        if os.path.exists(best_filepath):
            best = json.load(open(best_filepath))

        if candidate['gain'] > best['gain']:
            best = candidate
            print('\n\n\n###############\nnew best', best,
                  '\naverage daily gain:', round(average_daily_gain,
                                                 5), '\n\n')
            print(settings, '\n')
            print(results[key], '\n\n')
            default_live_settings = load_settings(settings['exchange'],
                                                  do_print=False)
            live_settings = {
                k: settings[k] if k in settings else default_live_settings[k]
                for k in default_live_settings
            }
            live_settings['indicator_settings'] = {
                'tick_ema': {
                    'span': best['ema_span']
                },
                'do_long': backtesting_settings['do_long'],
                'do_shrt': backtesting_settings['do_shrt']
            }
            json.dump(live_settings,
                      open(base_filepath + 'best_result_live_settings.json',
                           'w'),
                      indent=4,
                      sort_keys=True)
            print('\n\n', json.dumps(live_settings, indent=4, sort_keys=True),
                  '\n\n')
            json.dump({
                **{
                    'gain': result['gain']
                },
                **best
            },
                      open(best_filepath, 'w'),
                      indent=4,
                      sort_keys=True)
        candidate = get_new_candidate(ranges, best, m=mutation_coefficient)
        with open(results_filename, 'a') as f:
            f.write(json.dumps(results[key]) + '\n')
async def main():
    exchange = sys.argv[1]
    user = sys.argv[2]

    settings_filepath = os.path.join('backtesting_settings', exchange, '')
    backtesting_settings = \
        json.load(open(os.path.join(settings_filepath, 'backtesting_settings.json')))

    try:
        session_name = sys.argv[3]
        print('\n\nusing given session name', session_name, '\n\n')
    except IndexError:
        session_name = backtesting_settings['session_name']
        print('\n\nusing session name from backtesting_settings.json',
              session_name, '\n\n')

    symbol = backtesting_settings['symbol']
    n_days = backtesting_settings['n_days']
    ranges = json.load(open(os.path.join(settings_filepath, 'ranges.json')))
    results_filepath = make_get_filepath(
        os.path.join('backtesting_results', exchange, symbol, session_name,
                     ''))
    print(results_filepath)

    settings_from_exchange_fp = results_filepath + 'settings_from_exchange.json'
    if os.path.exists(settings_from_exchange_fp):
        settings_from_exchange = json.load(open(settings_from_exchange_fp))
    else:
        tmp_live_settings = load_settings(exchange, do_print=False)
        tmp_live_settings['symbol'] = backtesting_settings['symbol']
        settings_from_exchange = {}
        if exchange == 'binance':
            bot = await create_bot_binance(user, tmp_live_settings)
            settings_from_exchange['inverse'] = False
            settings_from_exchange['maker_fee'] = 0.00018
            settings_from_exchange['taker_fee'] = 0.00036
            settings_from_exchange['exchange'] = 'binance'
        elif exchange == 'bybit':
            bot = await create_bot_bybit(user, tmp_live_settings)
            settings_from_exchange['inverse'] = True
            settings_from_exchange['maker_fee'] = -0.00025
            settings_from_exchange['taker_fee'] = 0.00075
            settings_from_exchange['exchange'] = 'bybit'
        settings_from_exchange['min_qty'] = bot.min_qty
        settings_from_exchange['min_notional'] = bot.min_notional
        settings_from_exchange['qty_step'] = bot.qty_step
        settings_from_exchange['price_step'] = bot.price_step
        settings_from_exchange['max_leverage'] = bot.max_leverage
        await bot.cc.close()
        json.dump(settings_from_exchange,
                  open(settings_from_exchange_fp, 'w'),
                  indent=4)
    if 'leverage' in ranges:
        ranges['leverage'][1] = min(ranges['leverage'][1],
                                    settings_from_exchange['max_leverage'])
        ranges['leverage'][0] = min(ranges['leverage'][0],
                                    ranges['leverage'][1])

    backtesting_settings = {**backtesting_settings, **settings_from_exchange}
    print(json.dumps(backtesting_settings, indent=4))
    trades_list_filepath = os.path.join(
        results_filepath, f"{n_days}_days_trades_list_cache.npy")
    if os.path.exists(trades_list_filepath):
        print('loading cached trade list', trades_list_filepath)
        trades_list = np.load(trades_list_filepath, allow_pickle=True)
    else:
        agg_trades = await load_trades(exchange, user, symbol, n_days)
        print('preparing trades...')
        trades_list = prep_trades_list(agg_trades)
        np.save(trades_list_filepath, trades_list)
    jackrabbit(trades_list, backtesting_settings, ranges, results_filepath)
예제 #6
0
def jackrabbit(trades_list: [dict], backtesting_settings: dict, ranges: dict,
               base_filepath: str):

    if backtesting_settings['random_starting_candidate']:
        best = {
            key: calc_new_val((abs(ranges[key][1]) - abs(ranges[key][0])) / 2,
                              ranges[key], 1.0)
            for key in sorted(ranges)
        }
        print('random starting candidate:', best)
    else:
        best = sort_dict_keys({k_: backtesting_settings[k_] for k_ in ranges})

    n_days = backtesting_settings['n_days']
    results = {}
    best_gain = -9e9
    candidate = best

    ks = backtesting_settings['n_jackrabbit_iterations']
    k = backtesting_settings['starting_k']
    ms = np.array([1 / (i / 2 + 16) for i in range(ks)])
    ms = ((ms - ms.min()) / (ms.max() - ms.min()))
    trades_filepath = make_get_filepath(
        os.path.join(base_filepath, 'trades', ''))
    json.dump(backtesting_settings,
              open(base_filepath + 'backtesting_settings.json', 'w'),
              indent=4,
              sort_keys=True)

    print(backtesting_settings, '\n\n')

    while k < ks - 1:

        if candidate['min_markup'] >= candidate['max_markup']:
            candidate['min_markup'] = candidate['max_markup']

        settings_ = {**backtesting_settings, **candidate}
        key = format_dict(candidate)
        if key in results:
            print('\nskipping', key)
            candidate = get_new_candidate(ranges, best)
            continue
        print(f'\nk={k}, m={ms[k]:.4f} candidate:\n', candidate)
        start_time = time()
        trades = backtest(trades_list, settings_)
        print('\ntime elapsed', round(time() - start_time, 1), 'seconds')
        if not trades:
            print('\nno trades')
            candidate = get_new_candidate(ranges, best)
            continue
        k += 1
        tdf = pd.DataFrame(trades).set_index('trade_id')
        tdf.to_csv(trades_filepath + key + '.csv')
        closest_liq = ((tdf.price - tdf.liq_price).abs() / tdf.price).min()
        biggest_pos_size = tdf.pos_size.abs().max()
        n_closes = len(tdf[tdf.type == 'close'])
        pnl_sum = tdf.pnl.sum()
        loss_sum = tdf[tdf.type == 'stop_loss'].pnl.sum()
        abs_pos_sizes = tdf.pos_size.abs()
        gain = (pnl_sum + settings_['balance']) / settings_['balance']
        average_daily_gain = gain**(1 / n_days)
        n_trades = len(tdf)
        result = {
            'n_closes': n_closes,
            'pnl_sum': pnl_sum,
            'loss_sum': loss_sum,
            'average_daily_gain': average_daily_gain,
            'gain': gain,
            'n_trades': n_trades,
            'closest_liq': closest_liq,
            'biggest_pos_size': biggest_pos_size,
            'n_days': n_days
        }
        print('\n\n', result)
        results[key] = {**result, **candidate}

        if gain > best_gain:
            best = candidate
            best_gain = gain
            print('\n\n\n###############\nnew best', best,
                  '\naverage daily gain:', round(average_daily_gain,
                                                 5), '\n\n')
            print(settings_, '\n')
            print(results[key], '\n\n')
            default_live_settings = load_settings(settings_['exchange'],
                                                  print_=False)
            live_settings = {
                k: settings_[k] if k in settings_ else default_live_settings[k]
                for k in default_live_settings
            }
            live_settings['indicator_settings'] = {
                'ema': {
                    'span': best['ema_span']
                }
            }
            json.dump(live_settings,
                      open(base_filepath + 'best_result_live_settings.json',
                           'w'),
                      indent=4,
                      sort_keys=True)
            json.dump(results[key],
                      open(base_filepath + 'best_result.json', 'w'),
                      indent=4,
                      sort_keys=True)
        candidate = get_new_candidate(ranges, best, m=ms[k])
        pd.DataFrame(results).T.to_csv(base_filepath + 'results.csv')