def get_all_new_pastes(self): """ Get all new pastes and set the last crawl time to now """ page = 1 oldest_timestamp = None new_pastes = [] while oldest_timestamp is None or oldest_timestamp > self.__last_crawl: try: current_pastes = self._list_pastes(page) # Page does not exist except KeyError: break for paste_id in current_pastes: paste_json = self._get_paste(paste_id) if oldest_timestamp is None or \ Arrow.fromtimestamp(paste_json["timestamp"]).replace(tzinfo="UTC") < oldest_timestamp: oldest_timestamp = Arrow.fromtimestamp( paste_json["timestamp"]) if oldest_timestamp > self.__last_crawl: new_pastes.append(paste_json) page += 1 self._logger.info("Found %d new pastes since %s" % (len(new_pastes), self.__last_crawl.for_json())) self.__set_last_crawl() return new_pastes
def set_name(self, site: str = ''): self.site = site.lower() set1 = self.dbdata.find({ 'site': site, 't0': { '$gt': self.start_timestamp() } }) qtrhrs = set1.distinct('qtrhr') result = [] for qh in sorted(qtrhrs): # qset = self.dbdata.find({'site': site, 'qtrhr': qh}) detail = [] for row in qset: reading = self.data_obj(row) detail.append(reading) minmax = self.determine_max_min(detail) qtrhour = { 'ts': Arrow.fromtimestamp(timestamp=qh).for_json(), 'dt': Arrow.fromtimestamp(timestamp=qh), 'min': minmax['min'], 'max': minmax['max'], 'avg': minmax['avg'], } result.append(qtrhour) # # Prepare for plot # dev_x = [] dev_y = [] labels = [] for r in result: time = r['dt'].strftime('%m/%d %H:%M') dev_x.append(time) labels.append(time) dev_y.append(r['avg']) n = labels.__len__() i = n - 1 div_factor = math.floor(n / 10) while i > 0: if i % div_factor != 0: labels[i] = '' i = i - 1 result = {'site': self.site, 'x': dev_x, 'y': dev_y, 'labels': labels} self.data = result
def user_action_stat(interval,user_out_name,song_out_name): #统计用户三日的数据量 ## step 1: 读入数据 base_time = 1426348800 #3-15-0-0-0的unix时间戳 base_time_stamp = Arrow.fromtimestamp(base_time) interval_seconds = interval * 24 * 3600 parts = load_csv_as_dict('%s/data_source/%s' %(PROJECT_PATH,'mars_tianchi_user_actions.csv')) user_dict = defaultdict( lambda: defaultdict(lambda: defaultdict(lambda: 0.0))) # 默认dict的一个trick! song_dict = defaultdict(lambda:defaultdict(lambda:defaultdict(lambda:0.0))) count = 0 ## step 2:统计数据 for part in parts: user_id = part['user_id'] song_id = part['song_id'] action_type = part['action_type'] gmt_create = eval(part['gmt_create']) date_interval_belong = int((Arrow.fromtimestamp(gmt_create) - base_time_stamp).total_seconds())/interval_seconds user_dict[user_id][date_interval_belong][action_type] += 1 song_dict[song_id][date_interval_belong][action_type] += 1 count += 1 if count % 1000 == 0: print 'statistical %s records' % count print 'total users: %s' % len(user_dict) print 'total songs: %s' % len(song_dict) ## step 3:写入到feature文件 fs = open('%s/feature/%s.csv' % (PROJECT_PATH,user_out_name),'w') fs.write('user_id,date_interval_%s ,plays,downloads,favors\n' % interval) count = 0 for user in user_dict: date_dict = user_dict[user] for date in date_dict: action = date_dict[date] fs.write('%s,%s,%s,%s,%s\n' % (user,date,action['1'],action['2'],action['3'])) count = count + 1 if count % 1000 == 0: print 'write %s length' % count fs.close() print 'user_dict is write done' fs = open('%s/feature/%s.csv' % (PROJECT_PATH,song_out_name),'w') fs.write('song_id,date_interval_%s,plays,downloads,favors\n' % interval) count = 0 for song in song_dict: date_dict = song_dict[song] for date in date_dict: action = date_dict[date] fs.write('%s,%s,%s,%s,%s\n' % (song,date,action['1'],action['2'],action['3'])) count += 1 if count % 1000 == 0: print 'write %s length' % count fs.close() print 'song_dict is write done'
def dump(self, record, data): """Dump the data.""" try: parent_data = dict_lookup(data, self.keys, parent=True) pd = parse_edtf(parent_data[self.key]) parent_data[f"{self.key}_start"] = Arrow.fromtimestamp( calendar.timegm(pd.lower_strict()), tzinfo=utc).date().isoformat() parent_data[f"{self.key}_end"] = Arrow.fromtimestamp( calendar.timegm(pd.upper_strict()), tzinfo=utc).date().isoformat() except (KeyError, EDTFParseException): # The field does not exists or had wrong data return data # FIXME: should log this in debug mode?
def evaluate_ct(self): try: print("Evaluating confirmation time (ECT), pre-notification at " + str(self.pre_latch_notif) + " out of " + str(self.data.confirmation_updates)) last_holdoff = self.ct.holdoff # reset the timer if no notifications received for a while if (self.pre_latch_notif >= self.data.confirmation_updates): print("ECT: Reset confirmation time.") self.ct.time = 2 self.ct.holdoff = False result = self.ct.update(self.latched_value) self.pre_latch_notif += 1 if self.ct.holdoff: announcement = str( (self.ct.delay - self.ct.time) * self.data.confirmation_interval ) + " until " + self.data.enumeration[self.latched_value] print("ECT: in holdoff, publishing time announcement: " + announcement) self.publish("tweeter/time_announce", announcement) if last_holdoff and not self.ct.holdoff: announcement = "is " + self.data.enumeration[result[1]] print("ECT: new value or value restored: " + announcement) self.publish("tweeter/time_announce", announcement) if result[0]: attime = arrow.fromtimestamp(time.time()).to( self.data.timezone).strftime(self.data.time_format) self.tweet( self.data.message.format(self.data.enumeration[result[1]], attime)) except: traceback.print_ext() os._exit(1)
async def from_dataset(cls, client: "ProtosBot", ds: "AutoPurgeJob") -> "Job": """ Create a new job from a dataset. """ channel = await client.fetch_channel(ds.channel_id) return Job(client, channel, ds.pattern, Arrow.fromtimestamp(ds.last))
def sources(): sources = get_collection('sources') limit = request.args.get('limit', 50) skip = request.args.get('skip', 0) skip = skip if skip != 0 else request.args.get( 'offset', 0) # allow another namespace 'offset' keyword = request.args.get('keyword', None) start_urls_keyword = request.args.get('start_urls_keyword', None) comment_keyword = request.args.get('comment_keyword', None) startwith = request.args.get('startwith', None) coop = request.args.get('coop', None) sql_filter = {} if keyword: sql_filter.update({"title": {'$regex': keyword, '$options': 'i'}}) if start_urls_keyword: sql_filter.update( {"start_urls": { '$regex': start_urls_keyword, '$options': 'i' }}) if comment_keyword: sql_filter.update( {"comments": { '$regex': comment_keyword, '$options': 'i' }}) if startwith: sql_filter.update({'_id': {'$lte': ObjectId(startwith)}}) if coop: sql_filter.update({'coops': coop}) data = [] data.extend([ clean_id(i) for i in sources.find(sql_filter).sort('machine_name', 1).skip( int(skip)).limit(int(limit)) ]) for d in data: if d.get('latestArticleCreated'): last_time = Arrow.fromtimestamp( d['latestArticleCreated']).to('Asia/Shanghai') d['latestArticleCreatedHuman'] = u'最后时间:{0}'.format( last_time.humanize(locale='zh_cn')) if (Arrow.utcnow() - last_time).days > 1: d['latestArticleCreatedHuman'] = u'<span class="text-danger">{}</span>'.format( d['latestArticleCreatedHuman']) ret = {'data': data, 'total': sources.find().count()} return jsonify(ret)
def validate_token(token): if not isinstance(token, str): raise TypeError('token은 반드시 `str` 이여야합니다.') secret_key = get_secret_key() s = JSONWebSignatureSerializer(secret_key) try: data = s.loads(token.encode('utf-8')) except BadSignature as e: raise InvalidTokenError('잘못된 token입니다.') expired_at = Arrow.fromtimestamp(data['expired_at']) now = utcnow() if expired_at < now: raise ExpiredTokenError('만료된 token입니다.') return data
def _reformat_json(self, dict_to_write): """ In-place reformatting method for a consistent format between objects. :param dict dict_to_write: Formats all JSONs to write out :return: None, changes the JSON in place """ self._logger.debug("Reformatting JSON: %s" % str(dict_to_write)) for key in dict_to_write: if key == "timestamp": arrow_date = Arrow.fromtimestamp( dict_to_write[key]).replace(tzinfo=self.timezone) val = arrow_date.for_json() elif key == "author": author = dict_to_write[key] val = author if str(author).lower( ) not in TinyWriter._unknown_authors else "Unknown" else: val = str(dict_to_write[key]).strip() if self.should_strip and type(dict_to_write[key]) == str\ else dict_to_write[key] if type(val) == str: val.replace("\r\n", "\n") dict_to_write[key] = val
def names(): items = get_collection('items') if request.method == 'GET': name_list = items.distinct('name') current = [] if os.path.exists('filter.json'): with open('filter.json', 'r') as f: current = json.load(f) msgs = {} for name in name_list: item = [ i for i in items.find({ 'name': name }, { '_id': 0 }).sort([('ctime', -1)]).limit(1) ] if item: item = item[0] last_time = Arrow.fromtimestamp( item['ctime']).to('Asia/Shanghai') msgs[name] = u'最后时间:{0}'.format( last_time.humanize(locale='zh_cn')) if (Arrow.utcnow() - last_time).days > 1: msgs[name] = u'<span class="text-danger">{}</span>'.format( msgs[name]) return jsonp({'all': name_list, 'current': current, 'msg': msgs}) elif request.method == 'POST': code = 500 if request.json: with open('filter.json', 'w') as f: f.write( json.dumps(request.json, ensure_ascii=False).encode('utf8')) code = 200 return Response('', code)
def test_generate_backtest_stats(default_conf, testdatadir): default_conf.update({'strategy': 'DefaultStrategy'}) StrategyResolver.load_strategy(default_conf) results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "sell_reason": [ SellType.ROI, SellType.STOP_LOSS, SellType.ROI, SellType.FORCE_SELL ] }), 'config': default_conf, 'locks': [], 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, } } timerange = TimeRange.parse_timerange('1510688220-1510700340') min_date = Arrow.fromtimestamp(1510688220) max_date = Arrow.fromtimestamp(1510700340) btdata = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, fill_up_missing=True) stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert strat_stats['backtest_start'] == min_date.datetime assert strat_stats['backtest_end'] == max_date.datetime assert strat_stats['total_trades'] == len(results['DefStrat']['results']) # Above sample had no loosing trade assert strat_stats['max_drawdown'] == 0.0 results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], "trade_duration": [123, 34, 31, 14], "open_at_end": [False, False, False, True], "sell_reason": [ SellType.ROI, SellType.STOP_LOSS, SellType.ROI, SellType.FORCE_SELL ] }), 'config': default_conf } } assert strat_stats['max_drawdown'] == 0.0 assert strat_stats['drawdown_start'] == datetime(1970, 1, 1, tzinfo=timezone.utc) assert strat_stats['drawdown_end'] == datetime(1970, 1, 1, tzinfo=timezone.utc) assert strat_stats['drawdown_end_ts'] == 0 assert strat_stats['drawdown_start_ts'] == 0 assert strat_stats['pairlist'] == ['UNITTEST/BTC'] # Test storing stats filename = Path(testdatadir / 'btresult.json') filename_last = Path(testdatadir / LAST_BT_RESULT_FN) _backup_file(filename_last, copy_file=True) assert not filename.is_file() store_backtest_stats(filename, stats) # get real Filename (it's btresult-<date>.json) last_fn = get_latest_backtest_filename(filename_last.parent) assert re.match(r"btresult-.*\.json", last_fn) filename1 = (testdatadir / last_fn) assert filename1.is_file() content = filename1.read_text() assert 'max_drawdown' in content assert 'strategy' in content assert 'pairlist' in content assert filename_last.is_file() _clean_test_file(filename_last) filename1.unlink()
def _format_date(date): """Format the given date into ISO format.""" arrow = Arrow.fromtimestamp(calendar.timegm(date), tzinfo=utc) return arrow.date().isoformat()
def test_generate_backtest_stats(default_conf, testdatadir, tmpdir): default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) StrategyResolver.load_strategy(default_conf) results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "is_short": [False, False, False, False], "stake_amount": [0.01, 0.01, 0.01, 0.01], "exit_reason": [ ExitType.ROI, ExitType.STOP_LOSS, ExitType.ROI, ExitType.FORCE_EXIT ] }), 'config': default_conf, 'locks': [], 'final_balance': 1000.02, 'rejected_signals': 20, 'timedout_entry_orders': 0, 'timedout_exit_orders': 0, 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, 'run_id': '123', } } timerange = TimeRange.parse_timerange('1510688220-1510700340') min_date = Arrow.fromtimestamp(1510688220) max_date = Arrow.fromtimestamp(1510700340) btdata = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, fill_up_missing=True) stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert strat_stats['backtest_start'] == min_date.strftime( DATETIME_PRINT_FORMAT) assert strat_stats['backtest_end'] == max_date.strftime( DATETIME_PRINT_FORMAT) assert strat_stats['total_trades'] == len(results['DefStrat']['results']) # Above sample had no loosing trade assert strat_stats['max_drawdown_account'] == 0.0 # Retry with losing trade results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "is_short": [False, False, False, False], "stake_amount": [0.01, 0.01, 0.01, 0.01], "exit_reason": [ ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS, ExitType.FORCE_EXIT ] }), 'config': default_conf, 'locks': [], 'final_balance': 1000.02, 'rejected_signals': 20, 'timedout_entry_orders': 0, 'timedout_exit_orders': 0, 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, 'run_id': '124', } } stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert pytest.approx(strat_stats['max_drawdown_account']) == 1.399999e-08 assert strat_stats['drawdown_start'] == '2017-11-14 22:10:00' assert strat_stats['drawdown_end'] == '2017-11-14 22:43:00' assert strat_stats['drawdown_end_ts'] == 1510699380000 assert strat_stats['drawdown_start_ts'] == 1510697400000 assert strat_stats['pairlist'] == ['UNITTEST/BTC'] # Test storing stats filename = Path(tmpdir / 'btresult.json') filename_last = Path(tmpdir / LAST_BT_RESULT_FN) _backup_file(filename_last, copy_file=True) assert not filename.is_file() store_backtest_stats(filename, stats) # get real Filename (it's btresult-<date>.json) last_fn = get_latest_backtest_filename(filename_last.parent) assert re.match(r"btresult-.*\.json", last_fn) filename1 = Path(tmpdir / last_fn) assert filename1.is_file() content = filename1.read_text() assert 'max_drawdown_account' in content assert 'strategy' in content assert 'pairlist' in content assert filename_last.is_file() _clean_test_file(filename_last) filename1.unlink()
def __set_last_crawl(self, timestamp=str(time())): if type(timestamp) == Arrow: self.__last_crawl = timestamp else: self.__last_crawl = Arrow.fromtimestamp( str(timestamp)).replace(tzinfo="UTC")
def process_data(self): try: logger.info("Processing WindsSpots data...") result = requests.get("http://api.windspots.com/windmobile/stationinfos?allStation=true", timeout=(self.connect_timeout, self.read_timeout)) for windspots_station in result.json()['stationInfo']: station_id = None try: windspots_id = windspots_station['@id'][10:] station_id = self.get_station_id(windspots_id) station = self.save_station( station_id, windspots_station['@shortName'], windspots_station['@name'], windspots_station['@wgs84Latitude'], windspots_station['@wgs84Longitude'], windspots_station['@maintenanceStatus'], altitude=windspots_station['@altitude'], url=urllib.parse.urljoin(self.provider_url, "/spots")) try: # Asking 2 days of data result = requests.get( "http://api.windspots.com/windmobile/stationdatas/windspots:{windspots_id}/60" .format(windspots_id=windspots_id), timeout=(self.connect_timeout, self.read_timeout)) try: windspots_measure = result.json() except ValueError: raise ProviderException("Action=Data return invalid json response") measures_collection = self.measures_collection(station_id) new_measures = [] try: key = arrow.get(windspots_measure['@lastUpdate']).timestamp except arrow.parser.ParserError: raise ProviderException("Unable to parse measure date: '{0}".format( windspots_measure['@lastUpdate'])) wind_direction_last = windspots_measure['windDirectionChart']['serie']['points'][0] wind_direction_key = int(wind_direction_last['date']) / 1000 if key != wind_direction_key: logger.warn( "{name} ({id}): wind direction measure '{direction}' is inconsistent with key '{key}'" .format( name=station['short'], id=station_id, key=Arrow.fromtimestamp(key).format('DD-MM-YY HH:mm:ssZZ'), direction=Arrow.fromtimestamp(wind_direction_key).format('DD-MM-YY HH:mm:ssZZ'))) if not measures_collection.find_one(key): try: measure = self.create_measure( key, wind_direction_last['value'], windspots_measure.get('windAverage'), windspots_measure.get('windMax'), windspots_measure.get('airTemperature'), windspots_measure.get('airHumidity')) new_measures.append(measure) except ProviderException as e: logger.warn("Error while processing measure '{0}' for station '{1}': {2}" .format(key, station_id, e)) except Exception as e: logger.exception("Error while processing measure '{0}' for station '{1}': {2}" .format(key, station_id, e)) self.raven_client.captureException() self.insert_new_measures(measures_collection, station, new_measures, logger) except ProviderException as e: logger.warn("Error while processing measure for station '{0}': {1}".format(station_id, e)) except Exception as e: logger.exception("Error while processing measure for station '{0}': {1}".format(station_id, e)) self.raven_client.captureException() self.add_last_measure(station_id) except ProviderException as e: logger.warn("Error while processing station '{0}': {1}".format(station_id, e)) except Exception as e: logger.exception("Error while processing station '{0}': {1}".format(station_id, e)) self.raven_client.captureException() except ProviderException as e: logger.warn("Error while processing Windspots: {0}".format(e)) except Exception as e: logger.exception("Error while processing Windspots: {0}".format(e)) self.raven_client.captureException() logger.info("Done !")
def generate_strategy_stats(btdata: Dict[str, DataFrame], strategy: str, content: Dict[str, Any], min_date: Arrow, max_date: Arrow, market_change: float) -> Dict[str, Any]: """ :param btdata: Backtest data :param strategy: Strategy name :param content: Backtest result data in the format: {'results: results, 'config: config}}. :param min_date: Backtest start date :param max_date: Backtest end date :param market_change: float indicating the market change :return: Dictionary containing results per strategy and a stratgy summary. """ results: Dict[str, DataFrame] = content['results'] if not isinstance(results, DataFrame): return {} config = content['config'] max_open_trades = min(config['max_open_trades'], len(btdata.keys())) starting_balance = config['dry_run_wallet'] stake_currency = config['stake_currency'] pair_results = generate_pair_metrics(btdata, stake_currency=stake_currency, starting_balance=starting_balance, results=results, skip_nan=False) sell_reason_stats = generate_sell_reason_stats( starting_balance=starting_balance, results=results) left_open_results = generate_pair_metrics( btdata, stake_currency=stake_currency, starting_balance=starting_balance, results=results.loc[results['is_open']], skip_nan=True) daily_stats = generate_daily_stats(results) trade_stats = generate_trading_stats(results) best_pair = max( [pair for pair in pair_results if pair['key'] != 'TOTAL'], key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None worst_pair = min( [pair for pair in pair_results if pair['key'] != 'TOTAL'], key=lambda x: x['profit_sum']) if len(pair_results) > 1 else None results['open_timestamp'] = results['open_date'].astype(int64) // 1e6 results['close_timestamp'] = results['close_date'].astype(int64) // 1e6 max_date_real = Arrow.fromtimestamp(max(results['close_timestamp'])).to( 'utc') if not results['close_timestamp'].empty else max_date ended_early = False if (max_date_real < max_date): max_date = max_date_real ended_early = True backtest_days = (max_date - min_date).days strat_stats = { 'trades': results.to_dict(orient='records'), 'config': config, 'locks': [lock.to_json() for lock in content['locks']], 'best_pair': best_pair, 'worst_pair': worst_pair, 'results_per_pair': pair_results, 'sell_reason_summary': sell_reason_stats, 'left_open_trades': left_open_results, 'total_trades': len(results), 'total_volume': float(results['stake_amount'].sum()), 'avg_stake_amount': results['stake_amount'].mean() if len(results) > 0 else 0, 'profit_mean': results['profit_ratio'].mean() if len(results) > 0 else 0, 'profit_median': results['profit_ratio'].median() if len(results) > 0 else 0, 'profit_total': results['profit_abs'].sum() / starting_balance, 'profit_total_abs': results['profit_abs'].sum(), 'backtest_start': min_date.datetime, 'backtest_start_ts': min_date.int_timestamp * 1000, 'backtest_end': max_date.datetime, 'backtest_end_ts': max_date.int_timestamp * 1000, 'backtest_days': backtest_days, 'early_end': '* ' if ended_early else '', 'win_loss_ratio': round( len(results[results['profit_abs'] > 0]) / len(results[results['profit_abs'] < 0]), 4) if len(results[results['profit_abs'] < 0]) > 0 else 0.0, 'backtest_run_start_ts': content['backtest_start_time'], 'backtest_run_end_ts': content['backtest_end_time'], 'trades_per_day': round(len(results) / backtest_days, 2) if backtest_days > 0 else 0, 'market_change': market_change, 'pairlist': list(btdata.keys()), 'stake_amount': config['stake_amount'], 'stake_currency': config['stake_currency'], 'stake_currency_decimals': decimals_per_coin(config['stake_currency']), 'starting_balance': starting_balance, 'dry_run_wallet': starting_balance, 'final_balance': content['final_balance'], 'max_open_trades': max_open_trades, 'max_open_trades_setting': (config['max_open_trades'] if config['max_open_trades'] != float('inf') else -1), 'timeframe': config['timeframe'], 'timerange': config.get('timerange', ''), 'enable_protections': config.get('enable_protections', False), 'strategy_name': strategy, # Parameters relevant for backtesting 'stoploss': config['stoploss'], 'trailing_stop': config.get('trailing_stop', False), 'trailing_stop_positive': config.get('trailing_stop_positive'), 'trailing_stop_positive_offset': config.get('trailing_stop_positive_offset', 0.0), 'trailing_only_offset_is_reached': config.get('trailing_only_offset_is_reached', False), 'use_custom_stoploss': config.get('use_custom_stoploss', False), 'minimal_roi': config['minimal_roi'], 'use_sell_signal': config['ask_strategy']['use_sell_signal'], 'sell_profit_only': config['ask_strategy']['sell_profit_only'], 'sell_profit_offset': config['ask_strategy']['sell_profit_offset'], 'ignore_roi_if_buy_signal': config['ask_strategy']['ignore_roi_if_buy_signal'], **daily_stats, **trade_stats } try: max_drawdown, _, _, _, _ = calculate_max_drawdown( results, value_col='profit_ratio') drawdown_abs, drawdown_start, drawdown_end, high_val, low_val = calculate_max_drawdown( results, value_col='profit_abs') strat_stats.update({ 'max_drawdown': max_drawdown, 'max_drawdown_pct': drawdown_abs / (high_val + starting_balance), 'max_drawdown_abs': drawdown_abs, 'drawdown_start': drawdown_start, 'drawdown_start_ts': drawdown_start.timestamp() * 1000, 'drawdown_end': drawdown_end, 'drawdown_end_ts': drawdown_end.timestamp() * 1000, 'max_drawdown_low': low_val + starting_balance, 'max_drawdown_high': high_val + starting_balance, }) csum_min, csum_max = calculate_csum(results, starting_balance) strat_stats.update({'csum_min': csum_min, 'csum_max': csum_max}) except ValueError: strat_stats.update({ 'max_drawdown': 0.0, 'max_drawdown_pct': 0.0, 'max_drawdown_abs': 0.0, 'max_drawdown_low': 0.0, 'max_drawdown_high': 0.0, 'drawdown_start': datetime(1970, 1, 1, tzinfo=timezone.utc), 'drawdown_start_ts': 0, 'drawdown_end': datetime(1970, 1, 1, tzinfo=timezone.utc), 'drawdown_end_ts': 0, 'csum_min': 0, 'csum_max': 0 }) return strat_stats