def test_when_several_backups_only_backup_longest_lifetime_oldest_is_daily(mocker, given_stopped_container): mocker.patch('lxd_backup.cli.should_backup', return_value=True) container_name = given_stopped_container s3 = S3('apeyrard.com-test-bucket') unexpected_file = '_'.join([Arrow.utcnow().format('YYYY-MM-DD'), 'until', Arrow.utcnow().shift(months=1).format('YYYY-MM-DD'), container_name]) expected_file = '_'.join([Arrow.utcnow().format('YYYY-MM-DD'), 'until', Arrow.utcnow().shift(days=365).format('YYYY-MM-DD'), container_name]) sys.argv = ['', '-c', 'tests/test_files/s3/several_backups_same_day_oldest_is_daily.json'] main() with open('tmp/before_script_was_run') as f: before_script_result = f.readline() == 'True\n' with open('tmp/after_script_was_run') as f: after_script_result = f.readline() == 'True\n' file_exists = s3.exists(expected_file) file_does_not_exist = not s3.exists(unexpected_file) s3.delete_all() assert file_exists assert before_script_result assert after_script_result assert file_does_not_exist
def test_when_no_scripts_no_exceptions(mocker, given_stopped_container): container_name = given_stopped_container sys.argv = ['', '-c', 'tests/test_files/dir/nominal_without_scripts.json'] main() expected_file = '_'.join([Arrow.utcnow().format('YYYY-MM-DD'), 'until', Arrow.utcnow().shift(days=1).format('YYYY-MM-DD'), container_name]) expected_hash_file = ''.join([expected_file, '.md5']) file_exists = os.path.isfile(os.path.join('/tmp/images', expected_file)) hash_exists = os.path.isfile(os.path.join('/tmp/images', expected_hash_file)) Dir('/tmp/images').delete_all() assert file_exists assert hash_exists
def warn(page): now_string = Arrow.utcnow().strftime("%x %X") err = traceback.format_exception(*sys.exc_info()) with open("errlog", "a") as fh: builtins.print(now_string, repr(page.title), ":", file=fh) for line in err: builtins.print("\t" + line.rstrip(), file=fh)
def add_to_upload_queue(self, database: str, table: str, raw_row: Row) -> None: """ Adds a row to the upload queue. The queue will be uploaded if the queue size is larger than the threshold specified in the __init__. Args: database: The database to upload the Raw object to table: The table to upload the Raw object to raw_row: The row object """ with self.lock: # Ensure that the dicts has correct keys if database not in self.upload_queue: self.upload_queue[database] = dict() if table not in self.upload_queue[database]: self.upload_queue[database][table] = [] # Append row to queue self.upload_queue[database][table].append( TimestampedObject(payload=raw_row, created=Arrow.utcnow())) self.upload_queue_size += 1 self.rows_queued.labels(f"{database}:{table}").inc() self.queue_size.set(self.upload_queue_size) self._check_triggers()
def manual_watering(self, watering_request): # pause normal schedule jobs_paused = self.pause_schedule() start, last_duration_seconds = Arrow.utcnow(), 5 start_buffer_seconds = 5 # for every station, set a scheduling for the duration specified # stations are ran serially for station, duration in watering_request.items(): station_id = int(station) job_start = start.replace(seconds=last_duration_seconds) dt = job_start.format("YYYY-MM-DDTHH:mm:ssZZ").replace("-00:00", "+00:00") args = {"datetime": dt, "station": station_id, "fixed_duration": duration, "manual": 1} self.bg_scheduler.add_job(self.water, "date", run_date=job_start.datetime, args=[args]) last_duration_seconds = duration * 60 # reschedule the original schedule after all stations have watered job_start = start.replace(seconds=last_duration_seconds + start_buffer_seconds) self.bg_scheduler.add_job(self.resume_schedule, "date", run_date=job_start.datetime) # check if schedule contains: paused jobs, manual watering jobs, and extra job to resume paused jobs if len(self.bg_scheduler.get_jobs()) == (jobs_paused + len(watering_request) + 1): return True return False
def get(service_name: str): """Get the service schedule and return status and next schedule date/time.""" current_app.logger.info('<ServiceStatus.get') response, status = STATUS_SERVICE.check_status( service_name, Arrow.utcnow()), HTTPStatus.OK current_app.logger.debug('>ServiceStatus.get') return jsonify(response), status
def parse_stamps(self, expr=STAMP_RE, fmt='%H:%M, %d %B %Y (%Z)'): stamps = [] algo = self.archiver.config['algo'] try: maxage = str2time(re.search(r"^old\((\w+)\)$", algo).group(1)) except AttributeError as e: e.args = ("Malformed archive algorithm",) raise ArchiveError(e) for thread in self.threads: if mwp_parse(thread['header']).get(0).level != 2: # the header is not level 2 stamps = [] continue for stamp in expr.finditer(thread['content']): # This for loop can probably be optimised, but ain't nobody # got time fo' dat #if stamp.group(1) in MONTHS: try: stamps.append(Arrow.strptime(stamp.group(0), fmt)) except ValueError: # Invalid stamps should not be parsed, ever continue if stamps: # The most recent stamp should be used to see if we should archive most_recent = max(stamps) thread['stamp'] = most_recent thread['oldenough'] = Arrow.utcnow() - most_recent > maxage pass # No stamps were found, abandon thread stamps = []
async def daemon_before(self) -> None: """ Call `daemon_loop` immediately, then block the loop until the next-up UTC midnight. The first iteration is invoked directly such that synchronisation happens immediately after daemon start. We then calculate the time until the next-up midnight and sleep before letting `daemon_loop` begin. """ log.trace("Daemon before: performing start-up iteration.") await self.daemon_loop() log.trace( "Daemon before: calculating time to sleep before loop begins.") now = Arrow.utcnow() # The actual midnight moment is offset into the future to prevent issues with imprecise sleep. tomorrow = now.shift(days=1) midnight = tomorrow.replace(hour=0, minute=1, second=0, microsecond=0) sleep_secs = (midnight - now).total_seconds() log.trace( f"Daemon before: sleeping {sleep_secs} seconds before next-up midnight: {midnight}." ) await asyncio.sleep(sleep_secs)
async def maybe_rotate_icons(self) -> None: """ Call `rotate_icons` if the configured amount of time has passed since last rotation. We offset the calculated time difference into the future to avoid off-by-a-little-bit errors. Because there is work to be done before the timestamp is read and written, the next read will likely commence slightly under 24 hours after the last write. """ log.debug("Checking whether it's time for icons to rotate.") last_rotation_timestamp = await self.cache_information.get( "last_rotation_timestamp") if last_rotation_timestamp is None: # Maiden case ~ never rotated. await self.rotate_icons() return last_rotation = Arrow.utcfromtimestamp(last_rotation_timestamp) difference = (Arrow.utcnow() - last_rotation) + timedelta(minutes=5) log.trace( f"Icons last rotated at {last_rotation} (difference: {difference})." ) if difference.days >= BrandingConfig.cycle_frequency: await self.rotate_icons()
def parse_stamps(self, expr=STAMP_RE, fmt='%H:%M, %d %B %Y (%Z)'): stamps = [] algo = self.archiver.config['algo'] try: maxage = str2time(re.search(r"^old\((\w+)\)$", algo).group(1)) except AttributeError as e: e.args = ("Malformed archive algorithm", ) raise ArchiveError(e) for thread in self.threads: if mwp_parse(thread['header']).get(0).level != 2: # the header is not level 2 stamps = [] continue for stamp in expr.finditer(thread['content']): # This for loop can probably be optimised, but ain't nobody # got time fo' dat #if stamp.group(1) in MONTHS: try: stamps.append(Arrow.strptime(stamp.group(0), fmt)) except ValueError: # Invalid stamps should not be parsed, ever continue if stamps: # The most recent stamp should be used to see if we should archive most_recent = max(stamps) thread['stamp'] = most_recent thread['oldenough'] = Arrow.utcnow() - most_recent > maxage pass # No stamps were found, abandon thread stamps = []
def setUp(self): self.now = Arrow.utcnow() self.start = self.now self.end = self.now.shift(days=15) self.before_start = self.now.shift(days=-1) self.after_end = self.end.shift(days=1) self.dtr = DateTimeRange(self.start, self.end) self.left_open_dtr = DateTimeRange(None, self.end) self.right_open_dtr = DateTimeRange(self.start, None)
def sources(): sources = get_collection('sources') limit = request.args.get('limit', 50) skip = request.args.get('skip', 0) skip = skip if skip != 0 else request.args.get( 'offset', 0) # allow another namespace 'offset' keyword = request.args.get('keyword', None) start_urls_keyword = request.args.get('start_urls_keyword', None) comment_keyword = request.args.get('comment_keyword', None) startwith = request.args.get('startwith', None) coop = request.args.get('coop', None) sql_filter = {} if keyword: sql_filter.update({"title": {'$regex': keyword, '$options': 'i'}}) if start_urls_keyword: sql_filter.update( {"start_urls": { '$regex': start_urls_keyword, '$options': 'i' }}) if comment_keyword: sql_filter.update( {"comments": { '$regex': comment_keyword, '$options': 'i' }}) if startwith: sql_filter.update({'_id': {'$lte': ObjectId(startwith)}}) if coop: sql_filter.update({'coops': coop}) data = [] data.extend([ clean_id(i) for i in sources.find(sql_filter).sort('machine_name', 1).skip( int(skip)).limit(int(limit)) ]) for d in data: if d.get('latestArticleCreated'): last_time = Arrow.fromtimestamp( d['latestArticleCreated']).to('Asia/Shanghai') d['latestArticleCreatedHuman'] = u'最后时间:{0}'.format( last_time.humanize(locale='zh_cn')) if (Arrow.utcnow() - last_time).days > 1: d['latestArticleCreatedHuman'] = u'<span class="text-danger">{}</span>'.format( d['latestArticleCreatedHuman']) ret = {'data': data, 'total': sources.find().count()} return jsonify(ret)
def start_urls_hook(self, urls): new_urls = [] for url in urls: if '{{y}}' in url: now = Arrow.utcnow().to('Asia/Shanghai') url = url.replace('{{y}}', now.format('YY')).replace( '{{m}}', now.format('MM')).replace('{{d}}', now.format('DD')) new_urls.append(url) return new_urls
def __init__(self, loan: int, interest: float, starts_on: Optional[Arrow] = None): self.loan = loan self.interest = 1 + (interest / 100) if starts_on is not None: self.starts_on = starts_on else: self.starts_on = Arrow.utcnow()
def utcnow(): '''Returns an :class:`Arrow <arrow.Arrow>` object, representing "now" in UTC time. Usage:: >>> import arrow >>> arrow.utcnow() <Arrow [2013-05-08T05:19:07.018993+00:00]> ''' return Arrow.utcnow()
def _set_cached_df(self, pair: str, timeframe: str, dataframe: DataFrame) -> None: """ Store cached Dataframe. Using private method as this should never be used by a user (but the class is exposed via `self.dp` to the strategy) :param pair: pair to get the data for :param timeframe: Timeframe to get data for :param dataframe: analyzed dataframe """ self.__cached_pairs[(pair, timeframe)] = (dataframe, Arrow.utcnow().datetime)
async def rotate_icons(self) -> bool: """ Choose and apply the next-up icon in rotation. We keep track of the amount of times each icon has been used. The values in `cache_icons` can be understood to be iteration IDs. When an icon is chosen & applied, we bump its count, pushing it into the next iteration. Once the current iteration (lowest count in the cache) depletes, we move onto the next iteration. In the case that there is only 1 icon in the rotation and has already been applied, do nothing. Return a boolean indicating whether a new icon was applied successfully. """ log.debug("Rotating icons.") state = await self.cache_icons.to_dict() log.trace(f"Total icons in rotation: {len(state)}.") if not state: # This would only happen if rotation not initiated, but we can handle gracefully. log.warning( "Attempted icon rotation with an empty icon cache. This indicates wrong logic." ) return False if len(state) == 1 and 1 in state.values(): log.debug( "Aborting icon rotation: only 1 icon is available and has already been applied." ) return False current_iteration = min( state.values()) # Choose iteration to draw from. options = [ download_url for download_url, times_used in state.items() if times_used == current_iteration ] log.trace( f"Choosing from {len(options)} icons in iteration {current_iteration}." ) next_icon = random.choice(options) success = await self.apply_asset(AssetType.ICON, next_icon) if success: await self.cache_icons.increment( next_icon) # Push the icon into the next iteration. timestamp = Arrow.utcnow().timestamp() await self.cache_information.set("last_rotation_timestamp", timestamp) return success
def route_planner(point_list, start_point, start_time=None): """ 根据选出的景点进行计划规划,本模块的核心函数 :param point_list: ScenicPoint类型的数组 :param start_point: ScenicPoint类型的实例 :return: TourPlan类型实例数组 """ if not start_time: start_time = Arrow.utcnow() pls = _gen_seq(point_list, start_point) route_mat = get_route_matrix(pls[0][:-1]) for p_seq in pls: plan = TourPlan(start_time, p_seq) cost = _calc_plan_cost(plan, route_mat)
def test_when_use_config_file_to_export(mocker, given_stopped_container): container_name = given_stopped_container sys.argv = ['', '-c', 'tests/test_files/dir/nominal.json'] main() expected_file = '_'.join([Arrow.utcnow().format('YYYY-MM-DD'), 'until', Arrow.utcnow().shift(days=1).format('YYYY-MM-DD'), container_name]) expected_hash_file = ''.join([expected_file, '.md5']) with open('tmp/before_script_was_run') as f: before_script_result = f.readline() == 'True\n' with open('tmp/after_script_was_run') as f: after_script_result = f.readline() == 'True\n' file_exists = os.path.isfile(os.path.join('/tmp/images', expected_file)) hash_exists = os.path.isfile(os.path.join('/tmp/images', expected_hash_file)) Dir('/tmp/images').delete_all() assert file_exists assert hash_exists assert before_script_result assert after_script_result
def names(): items = get_collection('items') if request.method == 'GET': name_list = items.distinct('name') current = [] if os.path.exists('filter.json'): with open('filter.json', 'r') as f: current = json.load(f) msgs = {} for name in name_list: item = [ i for i in items.find({ 'name': name }, { '_id': 0 }).sort([('ctime', -1)]).limit(1) ] if item: item = item[0] last_time = Arrow.fromtimestamp( item['ctime']).to('Asia/Shanghai') msgs[name] = u'最后时间:{0}'.format( last_time.humanize(locale='zh_cn')) if (Arrow.utcnow() - last_time).days > 1: msgs[name] = u'<span class="text-danger">{}</span>'.format( msgs[name]) return jsonp({'all': name_list, 'current': current, 'msg': msgs}) elif request.method == 'POST': code = 500 if request.json: with open('filter.json', 'w') as f: f.write( json.dumps(request.json, ensure_ascii=False).encode('utf8')) code = 200 return Response('', code)
from datetime import timedelta from ceterach.api import MediaWiki from ceterach.page import Page from ceterach import exceptions as exc from passwords import lcsb3 import mwparserfromhell as mwp API_URL = "https://en.wikipedia.org/w/api.php" LOGIN_INFO = "Lowercase sigmabot III", lcsb3 SHUTOFF = "User:Lowercase sigmabot III/Shutoff" ARCHIVE_TPL = "User:MiszaBot/config" locale.setlocale(locale.LC_ALL, "en_US.utf8") STAMP_RE = re.compile(r"\d\d:\d\d, \d{1,2} (\w*?) \d\d\d\d \(UTC\)") THE_FUTURE = Arrow.utcnow() + timedelta(365) MONTHS = (None, "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December" ) class ArchiveError(exc.CeterachError): """Generic base class for archive exceptions""" class ArchiveSecurityError(ArchiveError): """Archive is not a subpage of page being archived and key not specified (or incorrect).""" if True:
def day(): return int(Arrow.utcnow().format('DD'))
def today(): return Arrow.utcnow().format('YYYY-MM-DD')
def get_date_from_lifetime(lifetime): lifetime = defaultdict(lambda: 0, lifetime) return Arrow.utcnow().shift(days=lifetime['days'], weeks=lifetime['weeks'], months=lifetime['months'], years=lifetime['years']).format('YYYY-MM-DD')
def month(): return int(Arrow.utcnow().format('MM'))
def weekday(): return Arrow.utcnow().weekday()
def nation_query(self, variable='A020101', start_year='1949', rowcode='zb', colcode='sj', proxy=False, try_times=20): wds = StatsGov.to_url_str([{'wdcode': rowcode, 'valuecode': variable}]) dfwds = StatsGov.to_url_str([{'wdcode': colcode, 'valuecode': ''.join([start_year, '-'])}]) retrive_url = self._stats_gov_url_template.format('QueryData', 'hgnd', rowcode, colcode, wds, dfwds) for i in range(try_times): if proxy: signal, r = self.scrape(retrive_url, self._proxy_manager.random_proxy) else: signal, r = self.scrape(retrive_url) if signal: break if not signal: print('Can not retrive url!!!') raise Exception return StatsGov.json_to_data(r.json(), condition={'zb':variable,'sj':range(int(start_year),Arrow.utcnow().year+1)})
def get(*args, **kwargs): '''Returns an :class:`Arrow <arrow.Arrow>` object based on flexible inputs. Usage:: >>> import arrow **No inputs** to get current UTC time:: >>> arrow.get() <Arrow [2013-05-08T05:51:43.316458+00:00]> **One str**, **float**, or **int**, convertible to a floating-point timestamp, to get that timestamp in UTC:: >>> arrow.get(1367992474.293378) <Arrow [2013-05-08T05:54:34.293378+00:00]> >>> arrow.get(1367992474) <Arrow [2013-05-08T05:54:34+00:00]> >>> arrow.get('1367992474.293378') <Arrow [2013-05-08T05:54:34.293378+00:00]> >>> arrow.get('1367992474') <Arrow [2013-05-08T05:54:34+00:00]> **One str**, convertible to a timezone, or **tzinfo**, to get the current time in that timezone:: >>> arrow.get('local') <Arrow [2013-05-07T22:57:11.793643-07:00]> >>> arrow.get('US/Pacific') <Arrow [2013-05-07T22:57:15.609802-07:00]> >>> arrow.get('-07:00') <Arrow [2013-05-07T22:57:22.777398-07:00]> >>> arrow.get(tz.tzlocal()) <Arrow [2013-05-07T22:57:28.484717-07:00]> **One** naive **datetime**, to get that datetime in UTC:: >>> arrow.get(datetime(2013, 5, 5)) <Arrow [2013-05-05T00:00:00+00:00]> **One** aware **datetime**, to get that datetime:: >>> arrow.get(datetime(2013, 5, 5, tzinfo=tz.tzlocal())) <Arrow [2013-05-05T00:00:00-07:00]> **Two** arguments, a naive or aware **datetime**, and a timezone expression (as above):: >>> arrow.get(datetime(2013, 5, 5), 'US/Pacific') <Arrow [2013-05-05T00:00:00-07:00]> **Two** arguments, both **str**, to parse the first according to the format of the second:: >>> arrow.get('2013-05-05 12:30:45', 'YYYY-MM-DD HH:mm:ss') <Arrow [2013-05-05T12:30:45+00:00]> **Three or more** arguments, as for the constructor of a **datetime**:: >>> arrow.get(2013, 5, 5, 12, 30, 45) <Arrow [2013-05-05T12:30:45+00:00]> ''' arg_count = len(args) if arg_count == 0: return Arrow.utcnow() if arg_count == 1: arg = args[0] timestamp = None try: timestamp = float(arg) except: pass # (int), (float), (str(int)) or (str(float)) -> from timestamp. if timestamp is not None: return Arrow.utcfromtimestamp(timestamp) # (datetime) -> from datetime. elif isinstance(arg, datetime): return Arrow.fromdatetime(arg) # (tzinfo) -> now, @ tzinfo. elif isinstance(arg, tzinfo): return Arrow.now(arg) # (str) -> now, @ tzinfo. elif isinstance(arg, str): _tzinfo = parser.TzinfoParser.parse(arg) return Arrow.now(_tzinfo) else: raise TypeError('Can\'t parse single argument type of \'{0}\''.format(type(arg))) elif arg_count == 2: arg_1, arg_2 = args[0], args[1] if isinstance(arg_1, datetime): # (datetime, tzinfo) -> fromdatetime @ tzinfo. if isinstance(arg_2, tzinfo): return Arrow.fromdatetime(arg_1, arg_2) # (datetime, str) -> fromdatetime @ tzinfo. elif isinstance(arg_2, str): _tzinfo = parser.TzinfoParser.parse(arg_2) return Arrow.fromdatetime(arg_1, _tzinfo) else: raise TypeError('Can\'t parse two arguments of types \'datetime\', \'{0}\''.format( type(arg_2))) # (str, format) -> parsed. elif isinstance(arg_1, str) and isinstance(arg_2, str): dt = parser.DateTimeParser.parse(args[0], args[1]) return Arrow.fromdatetime(dt) else: raise TypeError('Can\'t parse two arguments of types \'{0}\', \'{1}\''.format( type(arg_1), type(arg_2))) # 3+ args. else: return Arrow(*args, **kwargs)
def start_timestamp(self): days = self.days hours = -1 * (24 * days) return Arrow.utcnow().shift(hours=hours).timestamp
def test_generate_backtest_stats(default_conf, testdatadir, tmpdir): default_conf.update({'strategy': CURRENT_TEST_STRATEGY}) StrategyResolver.load_strategy(default_conf) results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "is_short": [False, False, False, False], "stake_amount": [0.01, 0.01, 0.01, 0.01], "exit_reason": [ ExitType.ROI, ExitType.STOP_LOSS, ExitType.ROI, ExitType.FORCE_EXIT ] }), 'config': default_conf, 'locks': [], 'final_balance': 1000.02, 'rejected_signals': 20, 'timedout_entry_orders': 0, 'timedout_exit_orders': 0, 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, 'run_id': '123', } } timerange = TimeRange.parse_timerange('1510688220-1510700340') min_date = Arrow.fromtimestamp(1510688220) max_date = Arrow.fromtimestamp(1510700340) btdata = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, fill_up_missing=True) stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert strat_stats['backtest_start'] == min_date.strftime( DATETIME_PRINT_FORMAT) assert strat_stats['backtest_end'] == max_date.strftime( DATETIME_PRINT_FORMAT) assert strat_stats['total_trades'] == len(results['DefStrat']['results']) # Above sample had no loosing trade assert strat_stats['max_drawdown_account'] == 0.0 # Retry with losing trade results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "is_short": [False, False, False, False], "stake_amount": [0.01, 0.01, 0.01, 0.01], "exit_reason": [ ExitType.ROI, ExitType.ROI, ExitType.STOP_LOSS, ExitType.FORCE_EXIT ] }), 'config': default_conf, 'locks': [], 'final_balance': 1000.02, 'rejected_signals': 20, 'timedout_entry_orders': 0, 'timedout_exit_orders': 0, 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, 'run_id': '124', } } stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert pytest.approx(strat_stats['max_drawdown_account']) == 1.399999e-08 assert strat_stats['drawdown_start'] == '2017-11-14 22:10:00' assert strat_stats['drawdown_end'] == '2017-11-14 22:43:00' assert strat_stats['drawdown_end_ts'] == 1510699380000 assert strat_stats['drawdown_start_ts'] == 1510697400000 assert strat_stats['pairlist'] == ['UNITTEST/BTC'] # Test storing stats filename = Path(tmpdir / 'btresult.json') filename_last = Path(tmpdir / LAST_BT_RESULT_FN) _backup_file(filename_last, copy_file=True) assert not filename.is_file() store_backtest_stats(filename, stats) # get real Filename (it's btresult-<date>.json) last_fn = get_latest_backtest_filename(filename_last.parent) assert re.match(r"btresult-.*\.json", last_fn) filename1 = Path(tmpdir / last_fn) assert filename1.is_file() content = filename1.read_text() assert 'max_drawdown_account' in content assert 'strategy' in content assert 'pairlist' in content assert filename_last.is_file() _clean_test_file(filename_last) filename1.unlink()
def nation_query(self, variable='A020101', start_year='1949', rowcode='zb', colcode='sj'): wds = StatsGov.to_url_str([{'wdcode': rowcode, 'valuecode': variable}]) dfwds = StatsGov.to_url_str([{'wdcode': colcode, 'valuecode': ''.join([start_year, '-'])}]) retrive_url = self._stats_gov_url_template.format('QueryData', 'hgnd', rowcode, colcode, wds, dfwds) r = requests.get(retrive_url) return StatsGov.json_to_data(r.json(), condition={'zb':variable,'sj':range(int(start_year),Arrow.utcnow().year+1)})
def test_generate_backtest_stats(default_conf, testdatadir): default_conf.update({'strategy': 'DefaultStrategy'}) StrategyResolver.load_strategy(default_conf) results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, 0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, 0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.003103, 0.003217], "trade_duration": [123, 34, 31, 14], "is_open": [False, False, False, True], "sell_reason": [ SellType.ROI, SellType.STOP_LOSS, SellType.ROI, SellType.FORCE_SELL ] }), 'config': default_conf, 'locks': [], 'backtest_start_time': Arrow.utcnow().int_timestamp, 'backtest_end_time': Arrow.utcnow().int_timestamp, } } timerange = TimeRange.parse_timerange('1510688220-1510700340') min_date = Arrow.fromtimestamp(1510688220) max_date = Arrow.fromtimestamp(1510700340) btdata = history.load_data(testdatadir, '1m', ['UNITTEST/BTC'], timerange=timerange, fill_up_missing=True) stats = generate_backtest_stats(btdata, results, min_date, max_date) assert isinstance(stats, dict) assert 'strategy' in stats assert 'DefStrat' in stats['strategy'] assert 'strategy_comparison' in stats strat_stats = stats['strategy']['DefStrat'] assert strat_stats['backtest_start'] == min_date.datetime assert strat_stats['backtest_end'] == max_date.datetime assert strat_stats['total_trades'] == len(results['DefStrat']['results']) # Above sample had no loosing trade assert strat_stats['max_drawdown'] == 0.0 results = { 'DefStrat': { 'results': pd.DataFrame({ "pair": [ "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC", "UNITTEST/BTC" ], "profit_ratio": [0.003312, 0.010801, -0.013803, 0.002780], "profit_abs": [0.000003, 0.000011, -0.000014, 0.000003], "open_date": [ Arrow(2017, 11, 14, 19, 32, 00).datetime, Arrow(2017, 11, 14, 21, 36, 00).datetime, Arrow(2017, 11, 14, 22, 12, 00).datetime, Arrow(2017, 11, 14, 22, 44, 00).datetime ], "close_date": [ Arrow(2017, 11, 14, 21, 35, 00).datetime, Arrow(2017, 11, 14, 22, 10, 00).datetime, Arrow(2017, 11, 14, 22, 43, 00).datetime, Arrow(2017, 11, 14, 22, 58, 00).datetime ], "open_rate": [0.002543, 0.003003, 0.003089, 0.003214], "close_rate": [0.002546, 0.003014, 0.0032903, 0.003217], "trade_duration": [123, 34, 31, 14], "open_at_end": [False, False, False, True], "sell_reason": [ SellType.ROI, SellType.STOP_LOSS, SellType.ROI, SellType.FORCE_SELL ] }), 'config': default_conf } } assert strat_stats['max_drawdown'] == 0.0 assert strat_stats['drawdown_start'] == datetime(1970, 1, 1, tzinfo=timezone.utc) assert strat_stats['drawdown_end'] == datetime(1970, 1, 1, tzinfo=timezone.utc) assert strat_stats['drawdown_end_ts'] == 0 assert strat_stats['drawdown_start_ts'] == 0 assert strat_stats['pairlist'] == ['UNITTEST/BTC'] # Test storing stats filename = Path(testdatadir / 'btresult.json') filename_last = Path(testdatadir / LAST_BT_RESULT_FN) _backup_file(filename_last, copy_file=True) assert not filename.is_file() store_backtest_stats(filename, stats) # get real Filename (it's btresult-<date>.json) last_fn = get_latest_backtest_filename(filename_last.parent) assert re.match(r"btresult-.*\.json", last_fn) filename1 = (testdatadir / last_fn) assert filename1.is_file() content = filename1.read_text() assert 'max_drawdown' in content assert 'strategy' in content assert 'pairlist' in content assert filename_last.is_file() _clean_test_file(filename_last) filename1.unlink()
from datetime import timedelta from ceterach.api import MediaWiki from ceterach.page import Page from ceterach import exceptions as exc from passwords import lcsb3 import mwparserfromhell as mwp API_URL = "https://en.wikipedia.org/w/api.php" LOGIN_INFO = "Lowercase sigmabot III", lcsb3 SHUTOFF = "User:Lowercase sigmabot III/Shutoff" ARCHIVE_TPL = "User:MiszaBot/config" locale.setlocale(locale.LC_ALL, "en_US.utf8") STAMP_RE = re.compile(r"\d\d:\d\d, \d{1,2} (\w*?) \d\d\d\d \(UTC\)") THE_FUTURE = Arrow.utcnow() + timedelta(365) MONTHS = (None, "January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December") class ArchiveError(exc.CeterachError): """Generic base class for archive exceptions""" class ArchiveSecurityError(ArchiveError): """Archive is not a subpage of page being archived and key not specified (or incorrect).""" if True: