def start_new_initial_eval(self, wi: int, hm_key: str): self.iter_counter += 1 # up iter counter on each new config started config = { **{ "long": deepcopy(self.hm[hm_key]["long"]["config"]), "short": deepcopy(self.hm[hm_key]["short"]["config"]), }, **{ k: self.config[k] for k in [ "starting_balance", "latency_simulation_ms", "market_type" ] }, **{ "symbol": self.symbols[0], "initial_eval_key": hm_key, "config_no": self.iter_counter }, } line = f"starting new initial eval {config['config_no']} of {self.n_harmonies} " if self.do_long: line += " - long: " + " ".join([ f"{e[0][:2]}{e[0][-2:]}" + str(round_dynamic(e[1], 3)) for e in sorted(self.hm[hm_key]["long"]["config"].items()) ]) if self.do_short: line += " - short: " + " ".join([ f"{e[0][:2]}{e[0][-2:]}" + str(round_dynamic(e[1], 3)) for e in sorted(self.hm[hm_key]["short"]["config"].items()) ]) logging.info(line) config["market_specific_settings"] = self.market_specific_settings[ config["symbol"]] config[ "ticks_cache_fname"] = f"{self.bt_dir}/{config['symbol']}/{self.ticks_cache_fname}" config["passivbot_mode"] = self.config["passivbot_mode"] self.workers[wi] = { "config": deepcopy(config), "task": self.pool.apply_async(backtest_wrap, args=(deepcopy(config), self.ticks_caches)), "id_key": config["config_no"], } self.unfinished_evals[config["config_no"]] = { "config": deepcopy(config), "single_results": {}, "in_progress": set([self.symbols[0]]), } self.hm[hm_key]["long"]["score"] = "in_progress" self.hm[hm_key]["short"]["score"] = "in_progress"
def compress_float(n: float, d: int) -> str: if n / 10**d >= 1: n = round(n) else: n = round_dynamic(n, d) nstr = format_float(n) if nstr.startswith("0."): nstr = nstr[1:] elif nstr.startswith("-0."): nstr = "-" + nstr[2:] elif nstr.endswith(".0"): nstr = nstr[:-2] return nstr
def round_values(xs, n: int): if type(xs) in [float, np.float64]: return round_dynamic(xs, n) if type(xs) == dict: return {k: round_values(xs[k], n) for k in xs} if type(xs) == list: return [round_values(x, n) for x in xs] if type(xs) == np.ndarray: return numpyize([round_values(x, n) for x in xs]) if type(xs) == tuple: return tuple([round_values(x, n) for x in xs]) if type(xs) == OrderedDict: return OrderedDict([(k, round_values(xs[k], n)) for k in xs]) return xs
async def main(): logging.basicConfig( format="%(asctime)s %(levelname)-8s %(message)s", level=logging.INFO, datefmt="%Y-%m-%dT%H:%M:%S", ) parser = argparse.ArgumentParser( prog="auto profit transfer", description= "automatically transfer percentage of profits from futures wallet to spot wallet", ) parser.add_argument("user", type=str, help="user/account_name defined in api-keys.json") parser.add_argument( "-p", "--percentage", type=float, required=False, default=0.5, dest="percentage", help="per uno, i.e. 0.02==2%. default=0.5", ) args = parser.parse_args() config = get_template_live_config() config["user"] = args.user config["symbol"] = "BTCUSDT" # dummy symbol config["market_type"] = "futures" bot = await create_binance_bot(config) transfer_log_fpath = make_get_filepath( os.path.join("logs", f"automatic_profit_transfer_log_{config['user']}.json")) try: already_transferred_ids = set(json.load(open(transfer_log_fpath))) logging.info(f"loaded already transferred IDs: {transfer_log_fpath}") except: already_transferred_ids = set() logging.info(f"no previous transfers to load") while True: now = (await bot.public_get(bot.endpoints["time"]))["serverTime"] try: income = await bot.get_all_income(start_time=now - 1000 * 60 * 60 * 24) except Exception as e: logging.error(f"failed fetching income {e}") traceback.print_exc() income = [] income = [ e for e in income if e["transaction_id"] not in already_transferred_ids ] profit = sum([e["income"] for e in income]) to_transfer = round_dynamic(profit * args.percentage, 4) if to_transfer > 0: try: transferred = await bot.private_post( bot.endpoints["futures_transfer"], { "asset": "USDT", "amount": to_transfer, "type": 2 }, base_endpoint=bot.spot_base_endpoint, ) logging.info( f"income: {profit} transferred {to_transfer} USDT") already_transferred_ids.update( [e["transaction_id"] for e in income]) json.dump(list(already_transferred_ids), open(transfer_log_fpath, "w")) except Exception as e: logging.error(f"failed transferring {e}") traceback.print_exc() else: logging.info("nothing to transfer") sleep(60 * 60)
def start_new_harmony(self, wi: int): self.iter_counter += 1 # up iter counter on each new config started template = get_template_live_config(self.config["passivbot_mode"]) new_harmony = { **{ "long": deepcopy(template["long"]), "short": deepcopy(template["short"]), }, **{ k: self.config[k] for k in [ "starting_balance", "latency_simulation_ms", "market_type" ] }, **{ "symbol": self.symbols[0], "config_no": self.iter_counter }, } new_harmony["long"]["enabled"] = self.do_long new_harmony["short"]["enabled"] = self.do_short for key in self.long_bounds: if np.random.random() < self.hm_considering_rate: # take note randomly from harmony memory new_note_long = self.hm[np.random.choice(list( self.hm))]["long"]["config"][key] new_note_short = self.hm[np.random.choice(list( self.hm))]["short"]["config"][key] if np.random.random() < self.pitch_adjusting_rate: # tweak note new_note_long = new_note_long + self.bandwidth * ( np.random.random() - 0.5) * abs(self.long_bounds[key][0] - self.long_bounds[key][1]) new_note_short = new_note_short + self.bandwidth * ( np.random.random() - 0.5) * abs(self.short_bounds[key][0] - self.short_bounds[key][1]) # ensure note is within bounds new_note_long = max( self.long_bounds[key][0], min(self.long_bounds[key][1], new_note_long)) new_note_short = max( self.short_bounds[key][0], min(self.short_bounds[key][1], new_note_short)) else: # new random note new_note_long = np.random.uniform(self.long_bounds[key][0], self.long_bounds[key][1]) new_note_short = np.random.uniform(self.short_bounds[key][0], self.short_bounds[key][1]) new_harmony["long"][key] = new_note_long new_harmony["short"][key] = new_note_short logging.debug( f"starting new harmony {new_harmony['config_no']} - long " + " ".join([ str(round_dynamic(e[1], 3)) for e in sorted(new_harmony["long"].items()) ]) + " - short: " + " ".join([ str(round_dynamic(e[1], 3)) for e in sorted(new_harmony["short"].items()) ])) new_harmony[ "market_specific_settings"] = self.market_specific_settings[ new_harmony["symbol"]] new_harmony[ "ticks_cache_fname"] = f"{self.bt_dir}/{new_harmony['symbol']}/{self.ticks_cache_fname}" new_harmony["passivbot_mode"] = self.config["passivbot_mode"] self.workers[wi] = { "config": deepcopy(new_harmony), "task": self.pool.apply_async(backtest_wrap, args=(deepcopy(new_harmony), self.ticks_caches)), "id_key": new_harmony["config_no"], } self.unfinished_evals[new_harmony["config_no"]] = { "config": deepcopy(new_harmony), "single_results": {}, "in_progress": set([self.symbols[0]]), }
def post_process(self, wi: int): # a worker has finished a job; process it cfg = deepcopy(self.workers[wi]["config"]) id_key = self.workers[wi]["id_key"] symbol = cfg["symbol"] self.unfinished_evals[id_key]["single_results"][symbol] = self.workers[ wi]["task"].get() self.unfinished_evals[id_key]["in_progress"].remove(symbol) results = deepcopy(self.unfinished_evals[id_key]["single_results"]) if set(results) == set(self.symbols): # completed multisymbol iter adgs_long = [v["adg_long"] for v in results.values()] adg_mean_long = np.mean(adgs_long) PAD_std_long_raw = np.mean( [v["pa_distance_std_long"] for v in results.values()]) PAD_std_long = np.mean([ max(self.config["maximum_pa_distance_std_long"], v["pa_distance_std_long"]) for v in results.values() ]) PAD_mean_long_raw = np.mean( [v["pa_distance_mean_long"] for v in results.values()]) PAD_mean_long = np.mean([ max(self.config["maximum_pa_distance_mean_long"], v["pa_distance_mean_long"]) for v in results.values() ]) adg_DGstd_ratios_long = [ v["adg_DGstd_ratio_long"] for v in results.values() ] adg_DGstd_ratios_long_mean = np.mean(adg_DGstd_ratios_long) adgs_short = [v["adg_short"] for v in results.values()] adg_mean_short = np.mean(adgs_short) PAD_std_short_raw = np.mean( [v["pa_distance_std_short"] for v in results.values()]) PAD_std_short = np.mean([ max(self.config["maximum_pa_distance_std_short"], v["pa_distance_std_short"]) for v in results.values() ]) PAD_mean_short_raw = np.mean( [v["pa_distance_mean_short"] for v in results.values()]) PAD_mean_short = np.mean([ max(self.config["maximum_pa_distance_mean_short"], v["pa_distance_mean_short"]) for v in results.values() ]) adg_DGstd_ratios_short = [ v["adg_DGstd_ratio_short"] for v in results.values() ] adg_DGstd_ratios_short_mean = np.mean(adg_DGstd_ratios_short) if self.config["score_formula"] == "adg_PAD_mean": score_long = -adg_mean_long * min( 1.0, self.config["maximum_pa_distance_mean_long"] / PAD_mean_long) score_short = -adg_mean_short * min( 1.0, self.config["maximum_pa_distance_mean_short"] / PAD_mean_short) elif self.config["score_formula"] == "adg_PAD_std": score_long = -adg_mean_long / max( self.config["maximum_pa_distance_std_long"], PAD_std_long) score_short = -adg_mean_short / max( self.config["maximum_pa_distance_std_short"], PAD_std_short) elif self.config["score_formula"] == "adg_DGstd_ratio": score_long = -adg_DGstd_ratios_long_mean score_short = -adg_DGstd_ratios_short_mean elif self.config["score_formula"] == "adg_mean": score_long = -adg_mean_long score_short = -adg_mean_short elif self.config["score_formula"] == "adg_min": score_long = -min(adgs_long) score_short = -min(adgs_short) elif self.config["score_formula"] == "adg_PAD_std_min": # best worst score scores_long = [ v["adg_long"] / max(v["pa_distance_std_long"], self.config["maximum_pa_distance_std_long"]) for v in results.values() ] score_long = -min(scores_long) scores_short = [ v["adg_short"] / max(v["pa_distance_std_short"], self.config["maximum_pa_distance_std_short"]) for v in results.values() ] score_short = -min(scores_short) else: raise Exception( f"unknown score formula {self.config['score_formula']}") line = f"completed multisymbol iter {cfg['config_no']} " if self.do_long: line += f"- adg long {adg_mean_long:.6f} PAD long {PAD_mean_long:.6f} std long " line += f"{PAD_std_long:.5f} score long {score_long:.7f} " if self.do_short: line += f"- adg short {adg_mean_short:.6f} PAD short {PAD_mean_short:.6f} std short " line += f"{PAD_std_short:.5f} score short {score_short:.7f}" logging.debug(line) # check whether initial eval or new harmony if "initial_eval_key" in cfg: self.hm[cfg["initial_eval_key"]]["long"]["score"] = score_long self.hm[ cfg["initial_eval_key"]]["short"]["score"] = score_short else: # check if better than worst in harmony memory worst_key_long = sorted( self.hm, key=lambda x: self.hm[x]["long"]["score"] if type(self.hm[x]["long"]["score"]) != str else -np.inf, )[-1] if self.do_long and score_long < self.hm[worst_key_long][ "long"]["score"]: logging.debug( f"improved long harmony, prev score " + f"{self.hm[worst_key_long]['long']['score']:.7f} new score {score_long:.7f} - " + " ".join([ str(round_dynamic(e[1], 3)) for e in sorted(cfg["long"].items()) ])) self.hm[worst_key_long]["long"] = { "config": deepcopy(cfg["long"]), "score": score_long, } json.dump( self.hm, open( f"{self.results_fpath}hm_{cfg['config_no']:06}.json", "w"), indent=4, sort_keys=True, ) worst_key_short = sorted( self.hm, key=lambda x: self.hm[x]["short"]["score"] if type(self.hm[x]["short"]["score"]) != str else -np.inf, )[-1] if self.do_short and score_short < self.hm[worst_key_short][ "short"]["score"]: logging.debug( f"improved short harmony, prev score " + f"{self.hm[worst_key_short]['short']['score']:.7f} new score {score_short:.7f} - " + " ".join([ str(round_dynamic(e[1], 3)) for e in sorted(cfg["short"].items()) ]), ) self.hm[worst_key_short]["short"] = { "config": deepcopy(cfg["short"]), "score": score_short, } json.dump( self.hm, open( f"{self.results_fpath}hm_{cfg['config_no']:06}.json", "w"), indent=4, sort_keys=True, ) best_key_long = sorted( self.hm, key=lambda x: self.hm[x]["long"]["score"] if type(self.hm[x]["long"]["score"]) != str else np.inf, )[0] best_key_short = sorted( self.hm, key=lambda x: self.hm[x]["short"]["score"] if type(self.hm[x]["short"]["score"]) != str else np.inf, )[0] best_config = { "long": deepcopy(self.hm[best_key_long]["long"]["config"]), "short": deepcopy(self.hm[best_key_short]["short"]["config"]), } best_config["result"] = { "symbol": f"{len(self.symbols)}_symbols", "exchange": self.config["exchange"], "start_date": self.config["start_date"], "end_date": self.config["end_date"], } tmp_fname = f"{self.results_fpath}{cfg['config_no']:06}_best_config" is_better = False if self.do_long and score_long <= self.hm[best_key_long]["long"][ "score"]: is_better = True logging.info( f"i{cfg['config_no']} - new best config long, score {score_long:.7f} " + f"adg {adg_mean_long / cfg['long']['wallet_exposure_limit']:.7f} " + f"PAD mean {PAD_mean_long_raw:.7f} " + f"PAD std {PAD_std_long_raw:.5f} adg/DGstd {adg_DGstd_ratios_long_mean:.7f}" ) tmp_fname += "_long" json.dump( results, open( f"{self.results_fpath}{cfg['config_no']:06}_result_long.json", "w"), indent=4, sort_keys=True, ) if self.do_short and score_short <= self.hm[best_key_short][ "short"]["score"]: is_better = True logging.info( f"i{cfg['config_no']} - new best config short, score {score_short:.7f} " + f"adg {adg_mean_short / cfg['short']['wallet_exposure_limit']:.7f} " + f"PAD mean {PAD_mean_short_raw:.7f} " + f"PAD std {PAD_std_short_raw:.5f} adg/DGstd {adg_DGstd_ratios_short_mean:.7f}" ) tmp_fname += "_short" json.dump( results, open( f"{self.results_fpath}{cfg['config_no']:06}_result_short.json", "w"), indent=4, sort_keys=True, ) if is_better: dump_live_config(best_config, tmp_fname + ".json") elif cfg["config_no"] % 25 == 0: logging.info(f"i{cfg['config_no']}") results["config_no"] = cfg["config_no"] with open(self.results_fpath + "all_results.txt", "a") as f: f.write( json.dumps({ "config": { "long": cfg["long"], "short": cfg["short"] }, "results": results }) + "\n") del self.unfinished_evals[id_key] self.workers[wi] = None