def test_post_payout(self, node_param): if node_param == "non_appbase": bts = self.bts else: bts = self.appbase query = Query() query["limit"] = 10 query["tag"] = "steemit" d = Post_discussions_by_payout(query, steem_instance=bts) self.assertEqual(len(d), 10)
def test_blog(self, node_param): if node_param == "non_appbase": bts = self.bts else: bts = self.appbase query = Query() query["limit"] = 10 query["tag"] = "gtg" d = Discussions_by_blog(query, steem_instance=bts) self.assertEqual(len(d), 10)
def test_comments(self, node_param): if node_param == "non_appbase": bts = self.bts else: bts = self.appbase query = Query() query["limit"] = 10 query["filter_tags"] = ["gtg"] query["start_author"] = "gtg" d = Discussions_by_comments(query, steem_instance=bts) self.assertEqual(len(d), 10)
def test_promoted(self): bts = self.bts query = Query() query["limit"] = 2 query["tag"] = "hive" d = Discussions_by_promoted(query, steem_instance=bts) discussions = Discussions(steem_instance=bts) d2 = [] for dd in discussions.get_discussions("promoted", query, limit=2): d2.append(dd) self.assertEqual(len(d), len(d2))
def main(): """Iterates over the most recently created contributions and adds them to the spreadsheet if not already in there. """ query = Query(limit=100, tag="utopian-io") result = get_urls() moderators = [ moderator["account"] for moderator in constants.DB_UTEMPIAN.moderators.find() ] for post in Discussions_by_created(query): steemit_url = ( f"{constants.STEEMIT_URL}{post.category}/{post.authorperm}") if steemit_url not in result: tags = post.json_metadata["tags"] # Checking if valid post if (len(tags) < 2 or post["created"].date() < constants.THIS_WEEK): continue else: is_valid, category = valid_category(tags) if not is_valid: continue elif (category == "translations" and post.author not in constants.UTOPIAN_TRANSLATORS): constants.LOGGER.error( f"{steemit_url} not made by accepted translator!") continue elif (category == "iamutopian" and post.author not in moderators): continue repository = get_repository(post) # If user banned, set moderator as BANNED and score to 0 if (post.author, "Yes") not in constants.BANNED_USERS: row = ["", "", steemit_url, repository, category] else: today = datetime.now().strftime("%Y-%m-%d %H:%M:%S") row = [ "BANNED", str(today), steemit_url, repository, category, "0", "", "", "", "", 0 ] constants.LOGGER.info(f"Commenting on {steemit_url} - BANNED.") banned_comment(steemit_url) constants.UNREVIEWED.append_row(row) result = get_urls() constants.LOGGER.info( f"{steemit_url} has tags: {tags} and was added.") store_contribution(post, category)
def test_promoted(self, node_param): if node_param == "non_appbase": bts = self.bts else: bts = self.appbase query = Query() query["limit"] = 10 query["tag"] = "steemit" d = Discussions_by_promoted(query, steem_instance=bts) discussions = Discussions(steem_instance=bts) d2 = [] for dd in discussions.get_discussions("promoted", query, limit=10): d2.append(dd) self.assertEqual(len(d), 10) self.assertEqual(len(d2), 10)
def get_community_posts(duration=86400, community_tag='hive-161179'): # Get community posts discussions = [] # steem_japan = 'hive-161179' # duration = 90000 # 25 hours in seconds # Get community posts query = Query(tag=community_tag) d = Discussions() posts = d.get_discussions('created', query, limit=1000) # Save posts that are less than the duration for post in posts: if post.time_elapsed().total_seconds() < duration: discussions.append(post) else: break return discussions
def get_comments(self): start_permlink = None limit = self.limit comment_count = 0 while True: query_limit = 100 if limit is not None: query_limit = min(limit - comment_count + 1, query_limit) query = Query(start_author=self.username, start_permlink=start_permlink, limit=query_limit) results = Discussions_by_comments(query) if len(results) == 0 or (start_permlink and len(results) == 1): return if comment_count > 0 and start_permlink: results = results[1:] # strip duplicates from previous iteration for comment in results: if comment["permlink"] == '': continue comment_count += 1 yield comment start_permlink = comment['permlink'] if comment_count == limit: return
def getPosts(): # grab Steem URI q = Query(limit=searchChoice, tag=topicChoice) for entry in Discussions_by_hot(q): # Get uri postUri = str(entry) splitUri = postUri.split() uriBase = splitUri[1] uri = uriBase[:-1] # grab post data from the blockchain post = Comment(uri) body = post['body'] published = post['created'] # cut uri display for uniformity on output if len(uriBase[:-1]) > 50: uriShow = str(uriBase[:47]) + '...' else: uriLen = len(uriBase[:-1]) uriShow = uriBase[:-1] + ' ' * (50 - uriLen) # write HTML card data htmlOut(uri, uriShow, body, published, body)
def test_cashout(self): bts = self.bts query = Query(limit=10) Discussions_by_cashout(query, steem_instance=bts)
for v in am: if v - correct_a != 0: diff_ans.append(v - correct_a) if abs(min(diff_ans)) == abs(correct_a - user_a): return True else: return False categories = ['comments', 'top_posts', 'total_payouts'] q_list = [ 'How many comments are in tag ', 'How many top posts are in tag ', 'What is the total payout (in HBD) for tag ' ] q = Query(limit=20, start_tag="") taglist = [] chosen_tags = [] chosen = 0 question_index = 0 question_max = 5 q_correct = 0 q_missed = 0 q_percent = 0 meta_list = [] score = 0 answers = [] # Collect top tags for h in Trending_tags(q): #print(h)
from beem.discussions import Query, Discussions_by_trending, Discussions_by_blog from beem.account import Account from beem.comment import Comment from beem.exceptions import ContentDoesNotExistsException query = Query(limit=10, tag="steemjet") query2 = Query(limit=10, tag="dimimp") def post_info(): info = [] for post in Discussions_by_trending(query): if "image" in post.json_metadata: picture = post.json_metadata["image"] acc = Account(post["author"]) rep = acc.rep info += [{ "title": post["title"], "content": post.body[:200], "author": post["author"], "author_rep": rep, "image": picture[0], "permalink": post["permlink"] }] elif "image" in post.json_metadata: if "http" in post.json_metadata["image"]: picture = post.json_metadata["image"] acc = Account(post["author"]) rep = acc.rep info += [{ "title": post["title"],
from django.utils import timezone from beem import Hive from beem.nodelist import NodeList from beem.account import Account from beem.comment import Comment from beem.discussions import Query from beem.discussions import Discussions from beem.instance import set_shared_blockchain_instance from beem.utils import construct_authorperm from markupsafe import Markup nodelist = NodeList() nodelist.update_nodes() #nodes = ["https://api.hive.blog", "https://anyx.io"] q = Query(limit=10) hv = Hive(node=nodelist.get_hive_nodes()) set_shared_blockchain_instance(hv) d = Discussions(blockchain_instance=hv) image_proxy = "https://images.hive.blog/480x0/" def strip(text): text['body'] = re.sub(r"(^https?:[^)''\"]+\.(?:jpg|jpeg|gif|png))", rf'![](\1) >', text['body']) text['body'] = markdown.markdown(text['body'], extensions=[ 'nl2br', 'codehilite', 'pymdownx.extra', 'pymdownx.magiclink', 'pymdownx.betterem', 'pymdownx.inlinehilite', 'pymdownx.snippets', 'pymdownx.striphtml']) text['body'] = re.sub("(<h1>|<h2>)", "<h3>", text['body']) text['body'] = re.sub(r"<img\b(?=\s)(?=(?:[^>=]|='[^']*'|=\"[^\"]*\"|=[^'\"][^\s>]*)*?\ssrc=['\"]([^\"]*)['\"]?)(?:[^>=]|='[^']*'|=\"[^\"]*\"|=[^'\"\s]*)*\"\s?\/?>", rf'<img src={image_proxy}\1 >', text['body'])
def cli( ctx, tags, all_tags, authors, wo_authors, with_resteems, start, end, min_age, max_age, voters, wo_voters, limit, verbose, ): # logger VERBOSITY = ["critical", "error", "warn", "info", "debug"][ int(min(verbose, 4)) ] LOGGER.setLevel(getattr(logging, VERBOSITY.upper())) FORMATTER = logging.Formatter(LOG_FORMAT) SH = logging.StreamHandler() SH.setFormatter(FORMATTER) LOGGER.addHandler(SH) LOGGER.info("Starting script") if max_age < min_age: click.echo( f"Min age ({min_age} hours) can't be higher than max age ({max_age} hours)." ) exit(1) if start > end: click.echo( f"Starting datetime ({start}) must be older than ending datetime ({end})." ) exit(1) # pass input vars to context ctx.ensure_object(dict) ctx.obj = { "TAGS": tags, "DATETIME_START": start, "DATETIME_END": end, "MIN_AGE": min_age, "MAX_AGE": max_age, "VOTERS": voters, "LIMIT": limit, } LOGGER.debug("Input params") LOGGER.debug(ctx.obj) results = [] q_limit = 100 if limit <= 100 else math.ceil(limit * 1.25) has_all_tags = partial(all_tags_in, tags) if all_tags else lambda x: True has_selected_author = ( partial(is_authored_by_any, authors) if authors else lambda x: True ) wo_excluded_authors = partial(is_not_authored_by, wo_authors) if wo_authors else lambda x: True if tags: for tag in tags: q = Query(tag=tag) discussions = Discussions(steem_instance=STM).get_discussions( discussion_type="created", discussion_query=q, limit=q_limit ) results += [ d for d in discussions if has_all_tags(d) and has_selected_author(d) and wo_excluded_authors(d) and voted_by_any(voters, d) and not voted_by_any(wo_voters, d) ] if authors: include_resteems = ( has_selected_author if not with_resteems else lambda x: True ) for author in authors: q = Query(tag=author) discussions = Discussions(steem_instance=STM).get_discussions( discussion_type="blog", discussion_query=q, limit=q_limit ) results += [ d for d in discussions if has_all_tags(d) and include_resteems(d) and wo_excluded_authors(d) and voted_by_any(voters, d) and not voted_by_any(wo_voters, d) ] results = list(remove_duplicates("authorperm", results)) results.sort(key=lambda x: x["created"], reverse=True) results = results[:limit] LOGGER.info("Found %d items to process", len(results)) # pass computed vars to context ctx.obj["RESULTS"] = results