def curation_stats(self): trailing_24hr_t = time.time() - datetime.timedelta( hours=24).total_seconds() trailing_7d_t = time.time() - datetime.timedelta( days=7).total_seconds() reward_24h = 0.0 reward_7d = 0.0 for reward in self.history2(filter_by="curation_reward", take=10000): timestamp = parse_time(reward['timestamp']).timestamp() if timestamp > trailing_7d_t: reward_7d += Amount(reward['reward']).amount if timestamp > trailing_24hr_t: reward_24h += Amount(reward['reward']).amount reward_7d = self.converter.vests_to_sp(reward_7d) reward_24h = self.converter.vests_to_sp(reward_24h) return { "24hr": reward_24h, "7d": reward_7d, "avg": reward_7d / 7, }
def stream(self, filter_by=list(), *args, **kwargs): """ Yield a stream of blocks :param list filter_by: List of operations to filter for, e.g. vote, comment, transfer, transfer_to_vesting, withdraw_vesting, limit_order_create, limit_order_cancel, feed_publish, convert, account_create, account_update, witness_update, account_witness_vote, account_witness_proxy, pow, custom, report_over_production, fill_convert_request, comment_reward, curate_reward, liquidity_reward, interest, fill_vesting_withdraw, fill_order, """ if isinstance(filter_by, str): filter_by = [filter_by] for ops in self.ops(*args, **kwargs): # deal with full_blocks optionality events = ops if type(ops) == dict: events = [ops] for event in events: op_type, op = event['op'] if not filter_by or op_type in filter_by: yield { "_id": self.hash_op(event), **op, "type": op_type, "timestamp": parse_time(event.get("timestamp")), "block_num": event.get("block"), "trx_id": event.get("trx_id"), }
def get_block_from_time(self, timestring, error_margin=10, mode="last_irreversible_block_num"): """ Estimate block number from given time :param str timestring: String representing time :param int error_margin: Estimate block number within this interval (in seconds) :param str mode: (default)Irreversible block (``last_irreversible_block_num``) or actual head block (``head_block_number``) """ known_block = self.get_current_block(mode) known_block_timestamp = self.block_timestamp(known_block) timestring_timestamp = parse_time(timestring).timestamp() delta = known_block_timestamp - timestring_timestamp block_delta = delta / 3 guess_block = known_block - block_delta guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp while abs(error) > error_margin: guess_block += error / 3 guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp return int(guess_block)
def block_timestamp(self, block_num): """ Returns the timestamp of the block with the given block number. :param int block_num: Block number """ block = self.steem.rpc.get_block(block_num) return int(parse_time(block['timestamp']).timestamp())
def filter_by_date(items, start_time, end_time=None): start_time = parse_time(start_time).timestamp() if end_time: end_time = parse_time(end_time).timestamp() else: end_time = time.time() filtered_items = [] for item in items: if 'time' in item: item_time = item['time'] elif 'timestamp' in item: item_time = item['timestamp'] timestamp = parse_time(item_time).timestamp() if end_time > timestamp > start_time: filtered_items.append(item) return filtered_items
def steemd_health(): s = Steem() head_block = s.get_block_header(s.head_block_number) head_block_time = parse_time(head_block['timestamp']) diff = (dt.datetime.utcnow() - head_block_time).seconds return dict( steemd_head_time=str(head_block_time), diff=diff, status='ok' if diff < 100 else 'impaired', )
def typify(value): """ typify takes a blockchain operation or dict/list/value, and then it parses and converts string types into native data types where appropriate. """ if type(value) == dict: return walk_values(typify, value) if type(value) in [list, set]: return list(map(typify, value)) if type(value) == str: if re.match('^\d+\.\d+ (STEEM|SBD|VESTS)$', value): return keep_in_dict(Amount(value).__dict__, ['amount', 'asset']) if re.match('^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}$', value): return parse_time(value) return value
def get_block_from_time(self, timestring, error_margin=10): """ Estimate block number from given time :param str timestring: String representing time :param int error_margin: Estimate block number within this interval (in seconds) """ known_block = self.get_current_block()['block_num'] known_block_timestamp = self.block_timestamp(known_block) timestring_timestamp = parse_time(timestring).timestamp() delta = known_block_timestamp - timestring_timestamp block_delta = delta / 3 guess_block = known_block - block_delta guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp while abs(error) > error_margin: guess_block += error / 3 guess_block_timestamp = self.block_timestamp(guess_block) error = timestring_timestamp - guess_block_timestamp return int(guess_block)
def generate_cached_post_sql(id, post, updated_at): md = None try: md = json.loads(post['json_metadata']) if type(md) is not dict: md = {} except json.decoder.JSONDecodeError: pass thumb_url = '' if md and 'image' in md: thumb_url = get_img_url(first(md['image'])) or '' md['image'] = [thumb_url] # clean up tags, check if nsfw tags = [post['category']] if md and 'tags' in md and type(md['tags']) == list: tags = tags + md['tags'] tags = set(map(lambda str: (str or '').lower(), tags)) is_nsfw = int('nsfw' in tags) # payout date is last_payout if paid, and cashout_time if pending. payout_at = post['last_payout'] if post['cashout_time'][ 0:4] == '1969' else post['cashout_time'] # get total rshares, and create comma-separated vote data blob rshares = sum(int(v['rshares']) for v in post['active_votes']) csvotes = "\n".join(map(vote_csv_row, post['active_votes'])) # these are rshares which are PENDING payout_declined = False if Amount(post['max_accepted_payout']).amount == 0: payout_declined = True elif len(post['beneficiaries']) == 1: benny = first(post['beneficiaries']) if benny['account'] == 'null' and int(benny['weight']) == 10000: payout_declined = True # total payout (completed and/or pending) payout = sum([ Amount(post['total_payout_value']).amount, Amount(post['curator_payout_value']).amount, Amount(post['pending_payout_value']).amount, ]) # total promotion cost promoted = Amount(post['promoted']).amount # trending scores timestamp = parse_time(post['created']).timestamp() hot_score = score(rshares, timestamp, 10000) trend_score = score(rshares, timestamp, 480000) # TODO: evaluate adding these columns. Some CAN be computed upon access. # Some need to be in the db if queries will depend on them. (is_hidden) # is_no_payout # is_full_power # is_hidden # is_grayed # flag_weight # total_votes # up_votes values = collections.OrderedDict([ ('post_id', '%d' % id), ('title', "%s" % escape(post['title'])), ('preview', "%s" % escape(post['body'][0:1024])), ('img_url', "%s" % escape(thumb_url)), ('payout', "%f" % payout), ('promoted', "%f" % promoted), ('payout_at', "%s" % payout_at), ('updated_at', "%s" % updated_at), ('created_at', "%s" % post['created']), ('children', "%d" % post['children']), # TODO: remove this field ('rshares', "%d" % rshares), ('votes', "%s" % escape(csvotes)), ('json', "%s" % escape(json.dumps(md))), ('is_nsfw', "%d" % is_nsfw), ('sc_trend', "%f" % trend_score), ('sc_hot', "%f" % hot_score) ]) fields = values.keys() cols = ', '.join(fields) params = ', '.join([':' + k for k in fields]) update = ', '.join([k + " = :" + k for k in fields][1:]) sql = "INSERT INTO hive_posts_cache (%s) VALUES (%s) ON DUPLICATE KEY UPDATE %s" return (sql % (cols, params, update), values)
def head_block_lag(steemd_instance) -> int: """ Return age of head block (in seconds).""" s = steemd_instance head_block = s.get_block_header(s.head_block_number) head_block_time = parse_time(head_block['timestamp']) return (dt.utcnow() - head_block_time).seconds
def read_time(time_string): return int(parse_time(time_string).timestamp())
def report(): with open('reportlayout.txt') as layout: report_layout = layout.read() steem = Steem(node='wss://gtg.steem.house:8090') main_report = ('|' + column_title_thumbnail + ' | ' + column_title_author + ' | ' + column_title_post_title + ' | ' + column_title_payout + ' |\n' + '| --- | --- | --- | --- |\n') total_rewards = 0 total_posts = 0 trailing_24h_t = time.time() - datetime.timedelta( minutes=1440).total_seconds() # 24 * 60 = 1440 stoptime = time.time() starttime = trailing_24h_t author_list = [] for i in account.history_reverse(filter_by="vote", batch_size=10000): timestamp = parse_time(i['timestamp']).timestamp() if timestamp > trailing_24h_t: if i['voter'] == report_author: link = str('@' + i["author"] + "/" + i["permlink"]) full_link = ('https://steemit.com/tag/' + link) post = Post(link) reward = (Amount(post.get('total_payout_value')) + Amount(post.get('pending_payout_value'))) total_posts = total_posts + 1 tags = (post['json_metadata'].get('tags', [])) category = post.category if post.is_main_post() and not i['author'] == report_author: if scan_tag in tags or scan_tag in category: if total_rewards == 0: total_rewards = reward else: total_rewards = total_rewards + reward try: imagelink = post['json_metadata'].get('image')[0] except: imagelink = 'https://steem.io/images/steem.png' if len( imagelink ) > 100: # prevents problems with some image-links imagelink = 'https://steem.io/images/steem.png' image = ( '![main_image](https://img1.steemit.com/128x256/' + imagelink + ')') post_title = (post['title']) if len( post_title ) > 30: # cut off long titles, so the images scale properly post_title = post_title[:30] + " ..." if '|' in post_title: # these symbols mess with the markdown layout post_title = post_title.replace('|', ';') if '[' in post_title: post_title = post_title.replace('[', '') if ']' in post_title: post_title = post_title.replace(']', '') main_report = (main_report + image + '|@' + i['author'] + '|[' + post_title + '](' + full_link + ')|' + str(reward) + '\n') if not i['author'] in author_list: author_list.append(i['author']) date = time.strftime(date_format, time.localtime(stoptime)) report_starttime = time.strftime(time_format, time.localtime(starttime)) report_stoptime = time.strftime(time_format, time.localtime(stoptime)) a = Amount(total_rewards) average_rewards = a * (1 / total_posts) total_authors = len(author_list) dated_report_title = (report_title + str(date)) beneficiaries = author_list beneficiaries.append(report_author) bene_list = [] bene_weight = 10000 // len(beneficiaries) bene_rest = 10000 - (bene_weight * len(beneficiaries)) for author in beneficiaries: bene_dict = OrderedDict() bene_dict['account'] = author bene_dict['weight'] = bene_weight if author == report_author: bene_dict['weight'] = bene_weight + bene_rest bene_list.append(bene_dict) report = report_layout report = report.replace('STARTTIME_GOES_HERE', str(report_starttime)) report = report.replace('STOPTIME_GOES_HERE', str(report_stoptime)) report = report.replace('REPORT_AUTHOR_GOES_HERE', str(report_author)) report = report.replace('TOTAL_POSTS_GOES_HERE', str(total_posts)) report = report.replace('TOTAL_AUTHORS_GOES_HERE', str(len(author_list))) report = report.replace('TOTAL_REWARDS_GOES_HERE', str(total_rewards)) report = report.replace('AVERAGE_REWARDS_GOES_HERE', str(average_rewards)) report = report.replace('TOTAL_BENEFICIARIES_GOES_HERE', str(len(beneficiaries))) report = report.replace('REPORT_GOES_HERE', str(main_report)) steem = Steem(keys=report_author_key, node='wss://gtg.steem.house:8090' ) # instanciate again, for good measure # uncomment this to post automatically """ steem.post(
def process_block(block): date = parse_time(block['timestamp']) block_num = int(block['previous'][:8], base=16) + 1 txs = block['transactions'] # NOTE: currently `prev` tracks the previous block number and this is enforced with a FK constraint. # soon we will have access to prev block hash and current hash in the API return value, we should use this instead. # the FK constraint will then fail if we somehow end up on the wrong side in a fork reorg. query("INSERT INTO hive_blocks (num, prev, txs, created_at) " "VALUES ('%d', '%d', '%d', '%s')" % (block_num, block_num - 1, len(txs), date)) accounts = set() comments = [] json_ops = [] deleted = [] for tx in txs: for operation in tx['operations']: op_type, op = operation if op_type == 'pow': accounts.add(op['worker_account']) elif op_type == 'pow2': accounts.add(op['work'][1]['input']['worker_account']) elif op_type in [ 'account_create', 'account_create_with_delegation' ]: accounts.add(op['new_account_name']) elif op_type == 'comment': comments.append(op) elif op_type == 'delete_comment': deleted.append(op) elif op_type == 'custom_json': json_ops.append(op) register_accounts( accounts, date) # if an account does not exist, mark it as created in this block register_posts( comments, date ) # if this is a new post, add the entry and validate community param delete_posts(deleted) # mark hive_posts.is_deleted = 1 for op in map(json_expand, json_ops): if op['id'] not in ['follow', 'com.steemit.community']: continue # we are assuming `required_posting_auths` is always used and length 1. # it may be that some ops will require `required_active_auths` instead # (e.g. if we use that route for admin action of acct creation) # if op['required_active_auths']: # log.warning("unexpected active auths: %s" % op) if len(op['required_posting_auths']) != 1: log.warning("unexpected auths: %s" % op) continue account = op['required_posting_auths'][0] op_json = op['json'] if op['id'] == 'follow': if block_num < 6000000 and type(op_json) != list: op_json = ['follow', op_json] # legacy compat process_json_follow_op(account, op_json, date) elif op['id'] == 'com.steemit.community': process_json_community_op(account, op_json, date)