def get_inbound_steem_transfers(account_name): nodes = ['https://api.steemit.com', 'https://gtg.steem.house:8090'] set_shared_steemd_instance(Steemd(nodes=nodes)) acc = Account('greenman') def filter(t): return t['to'] == account_name and float( t['amount'].split(' ')[0]) > .1 return [t for t in acc.history(filter_by=['transfer']) if filter(t)]
def test_history(): a = Account('barbara2') h1 = [x['index'] for x in list(a.history())] h2 = [x['index'] for x in list(a.history_reverse())] # pprint(list(zip(h1, h2[::-1]))) # various tests of equality should pass assert len(h1) == len(h2) assert set(h1) == set(h2) == set(range(a.virtual_op_count() + 1)) assert h1 == h2[::-1] == list(range(a.virtual_op_count() + 1))
def test_history(): # TODO 1: test is disabled because api.steemit.com account history # pruning is temporarily in place, breaking assumptions. # TODO 2: in addition, the current pruning implementation fails # to remove the very first operation, revealing a bug in # history_reverse() which causes it to be included once # on every page, causing an item count mismatch. return a = Account('barbara2') h1 = [x['index'] for x in list(a.history())] h2 = [x['index'] for x in list(a.history_reverse())] # pprint(list(zip(h1, h2[::-1]))) # various tests of equality should pass assert len(h1) == len(h2) assert set(h1) == set(h2) == set(range(a.virtual_op_count() + 1)) assert h1 == h2[::-1] == list(range(a.virtual_op_count() + 1))
def get_new_comments(U): for u in U: print("GET NEW COMMENT : %s" % u['id']) a = Account(u['id']) for comment in a.history(start=u['last_index'] + 1, filter_by="comment"): print("NEW COMMENT FROM %s -> %s" % (comment['author'], u['id'])) u['last_index'] = comment['index'] if comment['author'] != u['id']: author = comment['author'] text = comment['body'] url = "https://steemit.com/@%s/%s" % ( comment['parent_author'], comment['parent_permlink']) attachments_dict = dict() attachments_dict['title'] = "%s -> %s" % (comment['author'], u['id']) attachments_dict['title_link'] = url attachments_dict['text'] = text attachments_dict['mrkdwn_in'] = ["text"] attachments = [attachments_dict] slack.chat.post_message(channel="#%s" % u['id'], text=None, attachments=attachments)
def run(): log.info("Timely post mode activated") log.debug("initializing...") steem = Steem(keys=[cred.key]) account = Account(cred.id, steem) chain = Blockchain(steem) commit = Commit(steem) log.debug("ready", steem=steem, account=account, blockchain=chain, commit=commit) # Because subsequent edits to a post show up as separate post entries in the blockchain, # we'll keep a list of candidates keyed by the post identifier which the edits share. candidates = {} log.info("Checking post history...") history = map(Post, account.history(filter_by=['comment'])) # FIXME: use steem.get_posts() instead? for post in history: if post.is_main_post(): log.debug("found a top-level post", post=post, tags=post.tags) if post.tags[0] == cred.id and 'boiled' not in post.tags: candidates[post.identifier] = post if len(candidates) > 0: log.info("Found one or more historical posts to process", posts=candidates) deleting = [] for key, post in candidates.items(): result = process(commit, post) if result or result is None: deleting.append(key) for key in deleting: del candidates[key] log.info("Watching for new posts...") while True: stream = map(Post, chain.stream(filter_by=['comment'])) try: for post in stream: if post.is_main_post() and post.author == cred.id: log.debug("found a top-level post", post=post, tags=post.tags) if len(post.tags) == 2 and post.tags[0] == cred.id and post.tags[1] == cred.id: candidates[post.identifier] = post deleting = [] for key, post in candidates.items(): result = process(commit, post) if result or result is None: deleting.append(key) for key in deleting: del candidates[key] except PostDoesNotExist as e: log.debug("Post has vanished", exception=e) except RPCError as e: log.error("RPC problem while streaming posts", exception=e)