def test_utc_timestamp(): assert utc_timestamp(parse_time('1970-01-01T00:00:00')) == 0 assert utc_timestamp(parse_time('1970-01-01T00:00:01')) == 1 block_time = '2018-06-22T20:34:30' date = parse_time(block_time) timestamp = utc_timestamp(date) assert timestamp == 1529699670
def post_payout(post): """Get current vote/payout data and recalculate trend/hot score.""" # total payout (completed and/or pending) payout = sum([ sbd_amount(post['total_payout_value']), sbd_amount(post['curator_payout_value']), sbd_amount(post['pending_payout_value']), ]) # get total rshares, and create comma-separated vote data blob rshares = sum(int(v['rshares']) for v in post['active_votes']) csvotes = "\n".join(map(_vote_csv_row, post['active_votes'])) # trending scores _timestamp = parse_time(post['created']).timestamp() sc_trend = score(rshares, _timestamp, 480000) sc_hot = score(rshares, _timestamp, 10000) return { 'payout': payout, 'rshares': rshares, 'csvotes': csvotes, 'sc_trend': sc_trend, 'sc_hot': sc_hot }
def post_payout(post): """Get current vote/payout data and recalculate trend/hot score.""" # total payout (completed and/or pending) payout = sum([ sbd_amount(post['total_payout_value']), sbd_amount(post['curator_payout_value']), sbd_amount(post['pending_payout_value']), ]) # `active_votes` was temporarily missing in dev -- ensure this condition # is caught ASAP. if no active_votes then rshares MUST be 0. ref: steem#2568 assert post['active_votes'] or int(post['net_rshares']) == 0 # get total rshares, and create comma-separated vote data blob rshares = sum(int(v['rshares']) for v in post['active_votes']) csvotes = "\n".join(map(_vote_csv_row, post['active_votes'])) # trending scores _timestamp = utc_timestamp(parse_time(post['created'])) sc_trend = _score(rshares, _timestamp, 480000) sc_hot = _score(rshares, _timestamp, 10000) return { 'payout': payout, 'rshares': rshares, 'csvotes': csvotes, 'sc_trend': sc_trend, 'sc_hot': sc_hot }
def get_block_simple(self, block_num): block = self.get_block(block_num) return { 'num': int(block['block_id'][:8], base=16), 'date': parse_time(block['timestamp']), 'hash': block['block_id'] }
def post_payout(post): # total payout (completed and/or pending) payout = sum([ amount(post['total_payout_value']), amount(post['curator_payout_value']), amount(post['pending_payout_value']), ]) # total promotion cost promoted = amount(post['promoted']) # get total rshares, and create comma-separated vote data blob rshares = sum(int(v['rshares']) for v in post['active_votes']) csvotes = "\n".join(map(_vote_csv_row, post['active_votes'])) # trending scores _timestamp = parse_time(post['created']).timestamp() sc_trend = score(rshares, _timestamp, 480000) sc_hot = score(rshares, _timestamp, 10000) return { 'payout': payout, 'promoted': promoted, 'rshares': rshares, 'csvotes': csvotes, 'sc_trend': sc_trend, 'sc_hot': sc_hot }
def _get_block_simple(self, block_num): block = self.get_block(block_num) assert block, 'could not load block %d' % block_num return { 'num': int(block['block_id'][:8], base=16), 'date': parse_time(block['timestamp']), 'hash': block['block_id'] }
def test_parse_time(): block_time = '2018-06-22T20:34:30' assert parse_time(block_time) == datetime(2018, 6, 22, 20, 34, 30)
def test_head_time(client): head = parse_time(client.head_time()) assert head > datetime.datetime.now() - datetime.timedelta(minutes=15)
def test_head_time(): client = SteemClient.instance() head = parse_time(client.head_time()) assert head > datetime.datetime.now() - datetime.timedelta(minutes=15)
def stream_blocks(self, start_from, trail_blocks=0, max_gap=40): assert trail_blocks >= 0 assert trail_blocks < 25 last = self.get_block_simple(start_from - 1) head_num = self.head_block() next_expected = time.time() start_head = head_num lag_secs = 1 queue = [] while True: assert not last['num'] > head_num # if slots missed, advance head block time_now = time.time() while time_now >= next_expected + lag_secs: head_num += 1 next_expected += 3 # check we're not too far behind gap = head_num - last['num'] print("[LIVE] %d blocks behind..." % gap) if gap > max_gap: print("[LIVE] gap too large: %d" % gap) return # return to fast-sync # if caught up, await head advance. if head_num == last['num']: time.sleep(next_expected + lag_secs - time_now) head_num += 1 next_expected += 3 # get the target block; if DNE, pause and retry block_num = last['num'] + 1 block = self.get_block(block_num) if not block: lag_secs = min(3, lag_secs + 0.1) # tune inter-slot timing print( "[LIVE] block %d not available. hive:%d steem:%d. lag:%f" % (block_num, head_num, self.head_block(), lag_secs)) time.sleep(0.5) continue lag_secs -= 0.001 # timing forward creep last['num'] = block_num # if block doesn't link, we're forked if last['hash'] != block['previous']: if queue: # using trail_blocks, fork might not be in db print("[FORK] Fork in queue; emptying to retry.") return raise Exception( "[FORK] Fork in db: from %s, %s->%s" % (last['hash'], block['previous'], block['block_id'])) last['hash'] = block['block_id'] # detect missed blocks, adjust schedule block_date = parse_time(block['timestamp']) miss_secs = (block_date - last['date']).seconds - 3 if miss_secs and last['num'] >= start_head: print("[LIVE] %d missed blocks" % (miss_secs / 3)) next_expected += miss_secs lag_secs = 1 last['date'] = block_date # buffer block yield queue.append(block) if len(queue) > trail_blocks: yield queue.pop(0)