示例#1
0
def load_live_config(live_config_path: str) -> dict:
    try:
        live_config = json.load(open(live_config_path))
        live_config = json.loads(
            json.dumps(live_config).replace('secondary_grid_spacing',
                                            'secondary_pprice_diff'))
        assert all(k in live_config['long']
                   for k in get_template_live_config()['long'])
        return numpyize(live_config)
    except Exception as e:
        raise Exception(f'failed to load live config {live_config_path} {e}')
示例#2
0
def get_expanded_ranges(config: dict) -> dict:
    updated_ranges = OrderedDict()
    unpacked = unpack_config(get_template_live_config())
    for k0 in unpacked:
        if '£' in k0 or k0 in config['ranges']:
            for k1 in config['ranges']:
                if k1 in k0:
                    updated_ranges[k0] = config['ranges'][k1]
                    if 'pbr_limit' in k0:
                        updated_ranges[k0] = [
                            updated_ranges[k0][0],
                            min(updated_ranges[k0][1], config['max_leverage'])
                        ]
    return updated_ranges
示例#3
0
def create_config(config: dict) -> dict:
    updated_ranges = get_expanded_ranges(config)
    template = get_template_live_config()
    template['long']['enabled'] = config['do_long']
    template['shrt']['enabled'] = config['do_shrt']
    unpacked = unpack_config(template)
    for k in updated_ranges:
        side = 'long' if 'long' in k else ('shrt' if 'shrt' in k else '')
        if updated_ranges[k][0] != updated_ranges[k][1] and (
                not side or config[f'do_{side}']):
            unpacked[k] = tune.uniform(updated_ranges[k][0],
                                       updated_ranges[k][1])
        else:
            unpacked[k] = updated_ranges[k][0]
    return {**config, **unpacked, **{'ranges': updated_ranges}}
示例#4
0
async def main():
    parser = argparse.ArgumentParser(prog='Optimize', description='Optimize passivbot config.')
    parser = add_argparse_args(parser)
    parser.add_argument('-t', '--start', type=str, required=False, dest='starting_configs',
                        default=None,
                        help='start with given live configs.  single json file or dir with multiple json files')
    args = parser.parse_args()
    for config in await prep_config(args):
        try:
            template_live_config = get_template_live_config(config['n_spans'])
            config = {**template_live_config, **config}
            dl = Downloader(config)
            data = await dl.get_sampled_ticks()
            shm = shared_memory.SharedMemory(create=True, size=data.nbytes)
            shdata = np.ndarray(data.shape, dtype=data.dtype, buffer=shm.buf)
            shdata[:] = data
            del data
            config['n_days'] = (shdata[-1][0] - shdata[0][0]) / (1000 * 60 * 60 * 24)
            config['optimize_dirpath'] = make_get_filepath(os.path.join(config['optimize_dirpath'],
                                                                        ts_to_date(time())[:19].replace(':', ''), ''))

            print()
            for k in (keys := ['exchange', 'symbol', 'starting_balance', 'start_date', 'end_date', 'latency_simulation_ms',
                               'do_long', 'do_shrt', 'minimum_bankruptcy_distance', 'maximum_hrs_no_fills',
                               'maximum_hrs_no_fills_same_side', 'iters', 'n_particles', 'sliding_window_size',
                               'n_spans']):
                if k in config:
                    print(f"{k: <{max(map(len, keys)) + 2}} {config[k]}")
            print()

            backtest_wrap = BacktestWrap(shdata, config)
            post_processing = PostProcessing()
            if config['starting_configs']:
                starting_configs = get_starting_configs(config)
                initial_positions = [backtest_wrap.config_to_xs(cfg) for cfg in starting_configs]
            else:
                initial_positions = []
            pso_multiprocess(backtest_wrap.rf,
                             config['n_particles'],
                             backtest_wrap.bounds,
                             config['options']['c1'],
                             config['options']['c2'],
                             config['options']['w'],
                             n_cpus=config['num_cpus'],
                             iters=config['iters'],
                             initial_positions=initial_positions,
                             post_processing_func=post_processing.process)
        finally:
async def main():
    logging.basicConfig(
        format="%(asctime)s %(levelname)-8s %(message)s",
        level=logging.INFO,
        datefmt="%Y-%m-%dT%H:%M:%S",
    )
    parser = argparse.ArgumentParser(
        prog="auto profit transfer",
        description=
        "automatically transfer percentage of profits from futures wallet to spot wallet",
    )
    parser.add_argument("user",
                        type=str,
                        help="user/account_name defined in api-keys.json")
    parser.add_argument(
        "-p",
        "--percentage",
        type=float,
        required=False,
        default=0.5,
        dest="percentage",
        help="per uno, i.e. 0.02==2%.  default=0.5",
    )
    args = parser.parse_args()
    config = get_template_live_config()
    config["user"] = args.user
    config["symbol"] = "BTCUSDT"  # dummy symbol
    config["market_type"] = "futures"
    bot = await create_binance_bot(config)
    transfer_log_fpath = make_get_filepath(
        os.path.join("logs",
                     f"automatic_profit_transfer_log_{config['user']}.json"))
    try:
        already_transferred_ids = set(json.load(open(transfer_log_fpath)))
        logging.info(f"loaded already transferred IDs: {transfer_log_fpath}")
    except:
        already_transferred_ids = set()
        logging.info(f"no previous transfers to load")
    while True:
        now = (await bot.public_get(bot.endpoints["time"]))["serverTime"]
        try:
            income = await bot.get_all_income(start_time=now -
                                              1000 * 60 * 60 * 24)
        except Exception as e:
            logging.error(f"failed fetching income {e}")
            traceback.print_exc()
            income = []
        income = [
            e for e in income
            if e["transaction_id"] not in already_transferred_ids
        ]
        profit = sum([e["income"] for e in income])
        to_transfer = round_dynamic(profit * args.percentage, 4)
        if to_transfer > 0:
            try:
                transferred = await bot.private_post(
                    bot.endpoints["futures_transfer"],
                    {
                        "asset": "USDT",
                        "amount": to_transfer,
                        "type": 2
                    },
                    base_endpoint=bot.spot_base_endpoint,
                )
                logging.info(
                    f"income: {profit} transferred {to_transfer} USDT")
                already_transferred_ids.update(
                    [e["transaction_id"] for e in income])
                json.dump(list(already_transferred_ids),
                          open(transfer_log_fpath, "w"))
            except Exception as e:
                logging.error(f"failed transferring {e}")
                traceback.print_exc()
        else:
            logging.info("nothing to transfer")
        sleep(60 * 60)
示例#6
0
async def main():
    logging.basicConfig(format="", level=os.environ.get("LOGLEVEL", "INFO"))

    parser = argparse.ArgumentParser(
        prog="Optimize multi symbol",
        description="Optimize passivbot config multi symbol")
    parser.add_argument(
        "-o",
        "--optimize_config",
        type=str,
        required=False,
        dest="optimize_config_path",
        default="configs/optimize/harmony_search.hjson",
        help="optimize config hjson file",
    )
    parser.add_argument(
        "-t",
        "--start",
        type=str,
        required=False,
        dest="starting_configs",
        default=None,
        help=
        "start with given live configs.  single json file or dir with multiple json files",
    )
    parser.add_argument("-i",
                        "--iters",
                        type=int,
                        required=False,
                        dest="iters",
                        default=None,
                        help="n optimize iters")
    parser.add_argument("-c",
                        "--n_cpus",
                        type=int,
                        required=False,
                        dest="n_cpus",
                        default=None,
                        help="n cpus")
    parser.add_argument(
        "-le",
        "--long",
        type=str,
        required=False,
        dest="long_enabled",
        default=None,
        help="long enabled: [y/n]",
    )
    parser.add_argument(
        "-se",
        "--short",
        type=str,
        required=False,
        dest="short_enabled",
        default=None,
        help="short enabled: [y/n]",
    )
    parser.add_argument(
        "-pm",
        "--passivbot_mode",
        "--passivbot-mode",
        type=str,
        required=False,
        dest="passivbot_mode",
        default=None,
        help="passivbot mode options: [s/static_grid, r/recursive_grid]",
    )
    parser.add_argument(
        "-sf",
        "--score_formula",
        "--score-formula",
        type=str,
        required=False,
        dest="score_formula",
        default=None,
        help=
        "passivbot score formula options: [adg_PAD_mean, adg_PAD_std, adg_DGstd_ratio, adg_mean, adg_min, adg_PAD_std_min]",
    )
    parser.add_argument(
        "-oh",
        "--ohlcv",
        help="use 1m ohlcv instead of 1s ticks",
        action="store_true",
    )
    parser = add_argparse_args(parser)
    args = parser.parse_args()
    args.symbol = "BTCUSDT"  # dummy symbol
    config = await prepare_optimize_config(args)
    if args.score_formula is not None:
        if args.score_formula not in [
                "adg_PAD_mean",
                "adg_PAD_std",
                "adg_DGstd_ratio",
                "adg_mean",
                "adg_min",
                "adg_PAD_std_min",
        ]:
            logging.error(f"unknown score formula {args.score_formula}")
            logging.error(f"using score formula {config['score_formula']}")
        else:
            config["score_formula"] = args.score_formula
    if args.passivbot_mode is not None:
        if args.passivbot_mode in ["s", "static_grid", "static"]:
            config["passivbot_mode"] = "static_grid"
        elif args.passivbot_mode in ["r", "recursive_grid", "recursive"]:
            config["passivbot_mode"] = "recursive_grid"
        else:
            raise Exception(f"unknown passivbot mode {args.passivbot_mode}")
    passivbot_mode = config["passivbot_mode"]
    assert passivbot_mode in [
        "recursive_grid",
        "static_grid",
    ], f"unknown passivbot mode {passivbot_mode}"
    config.update(get_template_live_config(passivbot_mode))
    config["exchange"], _, _ = load_exchange_key_secret(config["user"])
    args = parser.parse_args()
    if args.long_enabled is None:
        config["long"]["enabled"] = config["do_long"]
    else:
        if "y" in args.long_enabled.lower():
            config["long"]["enabled"] = config["do_long"] = True
        elif "n" in args.long_enabled.lower():
            config["long"]["enabled"] = config["do_long"] = False
        else:
            raise Exception("please specify y/n with kwarg -le/--long")
    if args.short_enabled is None:
        config["short"]["enabled"] = config["do_short"]
    else:
        if "y" in args.short_enabled.lower():
            config["short"]["enabled"] = config["do_short"] = True
        elif "n" in args.short_enabled.lower():
            config["short"]["enabled"] = config["do_short"] = False
        else:
            raise Exception("please specify y/n with kwarg -le/--short")
    if args.symbol is not None:
        config["symbols"] = args.symbol.split(",")
    if args.n_cpus is not None:
        config["n_cpus"] = args.n_cpus
    config["ohlcv"] = args.ohlcv
    print()
    lines = [(k, getattr(args, k)) for k in args.__dict__
             if args.__dict__[k] is not None]
    for line in lines:
        logging.info(
            f"{line[0]: <{max([len(x[0]) for x in lines]) + 2}} {line[1]}")
    print()

    # download ticks .npy file if missing
    if config["ohlcv"]:
        cache_fname = f"{config['start_date']}_{config['end_date']}_ohlcv_cache.npy"
    else:
        cache_fname = f"{config['start_date']}_{config['end_date']}_ticks_cache.npy"
    exchange_name = config["exchange"] + ("_spot" if config["market_type"]
                                          == "spot" else "")
    config["symbols"] = sorted(config["symbols"])
    for symbol in config["symbols"]:
        cache_dirpath = f"backtests/{exchange_name}/{symbol}/caches/"
        if not os.path.exists(cache_dirpath +
                              cache_fname) or not os.path.exists(
                                  cache_dirpath +
                                  "market_specific_settings.json"):
            logging.info(f"fetching data {symbol}")
            args.symbol = symbol
            if config["ohlcv"]:
                data = load_hlc_cache(
                    symbol,
                    config["start_date"],
                    config["end_date"],
                    base_dir=config["base_dir"],
                    spot=config["spot"],
                )
            else:
                tmp_cfg = await prepare_backtest_config(args)
                downloader = Downloader({**config, **tmp_cfg})
                await downloader.get_sampled_ticks()

    # prepare starting configs
    cfgs = []
    if args.starting_configs is not None:
        logging.info("preparing starting configs...")
        if os.path.isdir(args.starting_configs):
            for fname in os.listdir(args.starting_configs):
                try:
                    cfg = load_live_config(
                        os.path.join(args.starting_configs, fname))
                    assert determine_passivbot_mode(
                        cfg) == passivbot_mode, "wrong passivbot mode"
                    cfgs.append(cfg)
                except Exception as e:
                    logging.error(f"error loading config {fname}: {e}")
        elif os.path.exists(args.starting_configs):
            hm_load_failed = True
            if "hm_" in args.starting_configs:
                try:
                    hm = json.load(open(args.starting_configs))
                    for k in hm:
                        cfg = {
                            "long": hm[k]["long"]["config"],
                            "short": hm[k]["short"]["config"]
                        }
                        assert (determine_passivbot_mode(cfg) == passivbot_mode
                                ), "wrong passivbot mode in harmony memory"
                        cfgs.append(cfg)
                    logging.info(
                        f"loaded harmony memory {args.starting_configs}")
                    hm_load_failed = False
                except Exception as e:
                    logging.error(
                        f"error loading harmony memory {args.starting_configs}: {e}"
                    )
            if hm_load_failed:
                try:
                    cfg = load_live_config(args.starting_configs)
                    assert determine_passivbot_mode(
                        cfg) == passivbot_mode, "wrong passivbot mode"
                    cfgs.append(cfg)
                except Exception as e:
                    logging.error(
                        f"error loading config {args.starting_configs}: {e}")
    config["starting_configs"] = cfgs
    harmony_search = HarmonySearch(config)
    harmony_search.run()
示例#7
0
    def start_new_harmony(self, wi: int):
        self.iter_counter += 1  # up iter counter on each new config started
        template = get_template_live_config(self.config["passivbot_mode"])
        new_harmony = {
            **{
                "long": deepcopy(template["long"]),
                "short": deepcopy(template["short"]),
            },
            **{
                k: self.config[k]
                for k in [
                    "starting_balance", "latency_simulation_ms", "market_type"
                ]
            },
            **{
                "symbol": self.symbols[0],
                "config_no": self.iter_counter
            },
        }
        new_harmony["long"]["enabled"] = self.do_long
        new_harmony["short"]["enabled"] = self.do_short
        for key in self.long_bounds:
            if np.random.random() < self.hm_considering_rate:
                # take note randomly from harmony memory
                new_note_long = self.hm[np.random.choice(list(
                    self.hm))]["long"]["config"][key]
                new_note_short = self.hm[np.random.choice(list(
                    self.hm))]["short"]["config"][key]
                if np.random.random() < self.pitch_adjusting_rate:
                    # tweak note
                    new_note_long = new_note_long + self.bandwidth * (
                        np.random.random() -
                        0.5) * abs(self.long_bounds[key][0] -
                                   self.long_bounds[key][1])
                    new_note_short = new_note_short + self.bandwidth * (
                        np.random.random() -
                        0.5) * abs(self.short_bounds[key][0] -
                                   self.short_bounds[key][1])
                # ensure note is within bounds
                new_note_long = max(
                    self.long_bounds[key][0],
                    min(self.long_bounds[key][1], new_note_long))
                new_note_short = max(
                    self.short_bounds[key][0],
                    min(self.short_bounds[key][1], new_note_short))
            else:
                # new random note
                new_note_long = np.random.uniform(self.long_bounds[key][0],
                                                  self.long_bounds[key][1])
                new_note_short = np.random.uniform(self.short_bounds[key][0],
                                                   self.short_bounds[key][1])
            new_harmony["long"][key] = new_note_long
            new_harmony["short"][key] = new_note_short
        logging.debug(
            f"starting new harmony {new_harmony['config_no']} - long " +
            " ".join([
                str(round_dynamic(e[1], 3))
                for e in sorted(new_harmony["long"].items())
            ]) + " - short: " + " ".join([
                str(round_dynamic(e[1], 3))
                for e in sorted(new_harmony["short"].items())
            ]))

        new_harmony[
            "market_specific_settings"] = self.market_specific_settings[
                new_harmony["symbol"]]
        new_harmony[
            "ticks_cache_fname"] = f"{self.bt_dir}/{new_harmony['symbol']}/{self.ticks_cache_fname}"
        new_harmony["passivbot_mode"] = self.config["passivbot_mode"]
        self.workers[wi] = {
            "config":
            deepcopy(new_harmony),
            "task":
            self.pool.apply_async(backtest_wrap,
                                  args=(deepcopy(new_harmony),
                                        self.ticks_caches)),
            "id_key":
            new_harmony["config_no"],
        }
        self.unfinished_evals[new_harmony["config_no"]] = {
            "config": deepcopy(new_harmony),
            "single_results": {},
            "in_progress": set([self.symbols[0]]),
        }
示例#8
0
async def main():
    parser = argparse.ArgumentParser(prog='Optimize multi symbol', description='Optimize passivbot config multi symbol')
    parser.add_argument('-o', '--optimize_config', type=str, required=False, dest='optimize_config_path',
                        default='configs/optimize/multi_symbol.hjson', help='optimize config hjson file')
    parser.add_argument('-t', '--start', type=str, required=False, dest='starting_configs',
                        default=None,
                        help='start with given live configs.  single json file or dir with multiple json files')
    parser.add_argument('-i', '--iters', type=int, required=False, dest='iters', default=None, help='n optimize iters')
    parser = add_argparse_args(parser)
    args = parser.parse_args()
    args.symbol = 'BTCUSDT' # dummy symbol
    config = await prepare_optimize_config(args)
    config.update(get_template_live_config())
    config['exchange'], _, _ = load_exchange_key_secret(config['user'])
    config['long']['enabled'] = config['do_long']
    config['shrt']['enabled'] = config['do_shrt']
    if config['long']['enabled']:
        if config['shrt']['enabled']:
            print('optimizing both long and short')
            config['side'] = 'both'
        else:
            print('optimizing long')
            config['side'] = 'long'
    elif config['shrt']['enabled']:
        print('optimizing short')
        config['side'] = 'shrt'
    else:
        raise Exception('long, shrt or both must be enabled')

    # download ticks .npy file if missing
    cache_fname = f"{config['start_date']}_{config['end_date']}_ticks_cache.npy"
    exchange_name = config['exchange'] + ('_spot' if config['market_type'] == 'spot' else '')
    for symbol in sorted(config['symbols']):
        cache_dirpath = f"backtests/{exchange_name}/{symbol}/caches/"
        if not os.path.exists(cache_dirpath + cache_fname) or not os.path.exists(cache_dirpath + 'market_specific_settings.json'):
            print(f'fetching data {symbol}')
            args.symbol = symbol
            tmp_cfg = await prepare_backtest_config(args)
            downloader = Downloader({**config, **tmp_cfg})
            await downloader.get_sampled_ticks()

    pool = Pool(processes=config['n_cpus'])

    func_wrap = FuncWrap(pool, config)
    cfgs = []
    if args.starting_configs is not None:
        if os.path.isdir(args.starting_configs):
            cfgs = []
            for fname in os.listdir(args.starting_configs):
                try:
                    cfgs.append(load_live_config(os.path.join(args.starting_configs, fname)))
                except Exception as e:
                    print('error loading config:', e)
        elif os.path.exists(args.starting_configs):
            try:
                cfgs = [load_live_config(args.starting_configs)]
            except Exception as e:
                print('error loading config:', e)
    starting_xs = [func_wrap.config_to_xs(cfg) for cfg in cfgs]

    n_harmonies = config['n_harmonies']
    hm_considering_rate = config['hm_considering_rate']
    bandwidth = config['bandwidth']
    pitch_adjusting_rate = config['pitch_adjusting_rate']
    iters = config['iters']
    best_harmony = harmony_search(func_wrap.func, func_wrap.bounds, n_harmonies,
                                  hm_considering_rate, bandwidth, pitch_adjusting_rate, iters,
                                  starting_xs=starting_xs,
                                  post_processing_func=func_wrap.post_processing_func)
    best_conf = func_wrap.xs_to_config(best_harmony)
    print('best conf')
    print(best_conf)
    return