def vote(self, post=None, url=None, weight=None, retries=VOTE_RETRIES): c = SteemComment(comment=post, url=url) if retries <= 0: logger.error("Vote {} failed after retries for {} times".format( c.get_url(), VOTE_RETRIES)) return False while time.time() - self.last_vote_timestamp < MINIMUM_VOTE_INTERVAL: self.sleep() if time.time() - self.last_vote_timestamp >= MINIMUM_VOTE_INTERVAL: return self.vote(post, url, weight, retries - 1) success = False try: weight = weight or self.weight(c) success = self.voter.vote(c.get_comment(), weight=weight) self.last_vote_timestamp = time.time() except: logger.error( "Failed when voting {} with error: {} . {} retry times left.". format(c.get_url(), traceback.format_exc(), retries - 1)) return self.vote(post, url, weight, retries - 1) self.after_success(success, c) return success
def claim_all_scot_tokens(self): tokens = self.get_pending_scot_tokens() if tokens and len(tokens) > 0: body = [] for token in tokens: amount = self.get_scot_token_pending_amount(token) if amount and amount > 0: body.append({"symbol": token}) amount = float(amount) / 1000 logger.info("@{} will claim {} {} token".format( self.author, amount, token)) try: self.steem.custom_json("scot_claim_token", json.dumps(body), required_posting_auths=[self.author]) logger.info("@{} has claimed all tokens successfully".format( self.author)) except: logger.error( "Failed when @{} was claiming all token.\nError: {}". format(self.author, traceback.format_exc())) # clear the transaction buffer to avoid impacting the next transaction self.steem.clear() else: logger.info("@{} has no tokens to claim.".format(self.author))
def watch(self, ops): author = ops['author'] def perform_vote(): if isinstance(ops, Comment): c = SteemComment(comment=ops) else: c = SteemComment(ops=ops) self.append_to_vote_queue(post=c.get_comment()) self.ctx(ops) try: if self.what_to_vote(ops) and self.who_to_vote( author) and self.is_ready(): delay = self.when_to_vote(ops) # mins if delay is not None and delay > 0: secs = 60.0 * delay logger.info("I'll vote after {} seconds".format(secs)) t = Timer(secs, perform_vote) t.start() else: logger.info("I'll vote immediately") perform_vote() except: logger.error( "Failed when watching the comment [{}] with error: {} .". format(ops, traceback.format_exc()))
def vote(self, post=None, url=None, weight=None, retries=VOTE_RETRIES): c = SteemComment(comment=post, url=url) if retries <= 0: logger.error("Vote {} failed after retries for {} times".format( c.get_url(), VOTE_RETRIES)) return False while time.time() - self.last_vote_timestamp < MINIMUM_VOTE_INTERVAL: wait_time = round( MINIMUM_VOTE_INTERVAL + random.random() * MINIMUM_VOTE_INTERVAL * 0.2, 2) logger.info( "Sleep {} seconds to avoid voting too frequently.".format( wait_time)) time.sleep(wait_time) if time.time() - self.last_vote_timestamp >= MINIMUM_VOTE_INTERVAL: return self.vote(post, url, weight, retries - 1) success = False try: weight = weight or self.weight(c) success = self.voter.vote(c.get_comment(), weight=weight) self.last_vote_timestamp = time.time() except: logger.error( "Failed when voting {} with error: {} . {} retry times left.". format(c.get_url(), traceback.format_exc(), retries - 1)) return self.vote(post, url, weight, retries - 1) self.after_success(success) return success
def download(self, href, config): logger.warning(self.errored) token = href.split('=')[1] file_name = token + '-' + str(config['height']) file_path = DOWNLOAD_PATH + "/" + file_name + ".mp4" if path.exists(file_path): logger.debug('Already exists') return file_path if href in self.errored: logger.warning('Was errored before', href) return None else: logger.debug('Wasn\'t errored') try: yt = pytube.YouTube(href) video_filter = yt.streams\ .filter(subtype='mp4') \ .filter(progressive=False) quality = 0 for video in video_filter.all(): resolution = video.resolution logger.debug(f"get {video.url}") if resolution is not None: resolution = int(video.resolution.replace('p', '')) if resolution <= config['height'] and resolution >= quality: quality = resolution video_filter = video_filter.filter(resolution=str(quality) + "p") video = video_filter.first() logger.info("Quality: " + str(quality) + "p") if video is None: self.errored[href] = True self.__save_cache__() return None subtype = video.subtype print(f"Downloading {DOWNLOAD_PATH}") video.download( DOWNLOAD_PATH, filename=file_name ) return file_path except Exception as error: logger.error('Error handled', error) self.errored[href] = True self.__save_cache__() return None
def _get_position(self, body): try: elements = SteemMarkdown(body).find_elements(POSITION_TAG_SELECTOR) if elements and len(elements) > 0: position = elements[0].get("position") return int(position) except: logger.error("Failed when getting position tag.\nError: {}".format(traceback.format_exc())) return DEFAULT_POSITION
def _has_published(self, title): try: posts = get_posts(account=self.author, limit=50) if len(posts) > 0: for post in posts: if post.title == title: return True except: logger.error("Failed when checking publish states. Error: {}".format(traceback.format_exc())) return False return False
def claim_scot_token(self, token=None): amount = self.get_scot_token_pending_amount(token) if amount and amount > 0: body = {"symbol": token} amount = float(amount) / 1000 try: self.steem.custom_json("scot_claim_token", json.dumps(body), required_posting_auths=[self.author]) logger.info("@{} has claimed {} {} token successfully".format(self.author, amount, token)) except: logger.error("Failed when @{} was claiming {} {} token.\nError: {}".format(self.author, amount, token, traceback.format_exc())) else: logger.info("@{} has no {} token to claim.".format(self.author, token))
def get_tags(self): try: c = self.get_comment() if c.json_metadata and 'tags' in c.json_metadata: return c.json_metadata['tags'] else: self.refresh() c = self.get_comment() if c.json_metadata and 'tags' in c.json_metadata: return c.json_metadata['tags'] return [] except: logger.error("Failed when generating pages.\nError: {}".format(c)) return []
def upload(self, path): if path: try: res = self.uploader.upload(path, self.author) if res and 'url' in res: url = res['url'] if url and len(url) > 0: logger.info( "Image [{}] uploaded to [{}] successfully".format( path, url)) return url except: logger.error( "Failed when uploading image {}.\nError: {}".format( path, traceback.format_exc())) return None
def get_token_transfers(self, token, offset=0, limit=100): if self.transfers is None: url = URL.format(account=self.account.author, symbol=token, offset=offset, limit=limit) try: r = requests.get(url) if r.ok: self.transfers = r.json() else: logger.error("Failed when retrieving transfer info") except: logger.error( "Failed when retrieving transfer info. Error: {}".format( traceback.format_exc())) return self.transfers
def load_from_link(link): try: scraped_data = urllib.request.urlopen(link) article = scraped_data.read() parsed_article = bs.BeautifulSoup(article, 'lxml') paragraphs = parsed_article.find_all('p') article_text = "" for p in paragraphs: article_text += p.text # divs = parsed_article.find_all('div') # for div in divs: # print("div") # article_text += div.text logger.debug(article_text) return article_text, None except Exception as error: logger.error(error) return None, "We cannot download article for your link"
def deploy(ctx, host="hexo"): """ deploy the static blog to the GitHub pages """ logger.info("launch the deploy on [{}]".format(host)) if host == "hexo": build(ctx) os.system("hexo deploy") elif host == "netlify": hook_id = settings.get_env_var("NETLIFY_HOOK") or None if hook_id: build_hook = "curl -X POST -d {} https://api.netlify.com/build_hooks/" + hook_id os.system(build_hook) else: logger.error("Failed: we need the hook ID to deploy") elif host == "github": pass else: pass
def transfer(self, to, token, amount, memo="", retries=5): if retries <= 0: logger.info("Transfer failed after maximum retires") return if to and token and amount and memo is not None: token = token.upper() try: if token in ["STEEM", "SBD"]: self.account.account.transfer(to, amount, token, memo) else: self.se_wallet.transfer(to, amount, token, memo=memo) logger.info("Transferred {} {} to {} with memo [{}]".format( amount, token, to, memo)) except: logger.error( "Failed when tranferring {} {} to {} with memo [{}].\nError: {}" .format(amount, token, to, memo, traceback.format_exc())) self.transfer(to, token, amount, memo, retries - 1)
def build_all(ctx, accounts=None, host="github", debug=False, production=False): """ download the posts of all the accounts, and generate pages """ if not debug: debug_setting = settings.get_env_var("DEBUG") if debug_setting and debug_setting.lower() == "true": debug = True accounts = accounts or settings.get_env_var("STEEM_ACCOUNTS") or [] if accounts and len(accounts) > 0: if production: setup(ctx) for account in accounts.split(","): try: logger.info("Start generating pages for account @{} ...".format(account)) clean(ctx) count = download(ctx, account=account, host=host, debug=debug, production=production) if count > 0: build(ctx, debug) except: logger.error("Failed when generating pages for account @{}.\nError: {}".format(account, traceback.format_exc()))
def run(self, callback=default_callback, lookback=0, start=-1, days=-1): try: if lookback > 0 or start > 0 or days > 0: if self.last_streamed_block == 0: if start > 0: start_block = start elif lookback > 0: start_block = self.blockchain.get_current_block_num() - int(lookback) #200000 else: start_date = days_ago(days) start_block = self.blockchain.get_estimated_block_num(start_date) else: start_block = self.last_streamed_block + 1 stop_block = self.blockchain.get_current_block_num() logger.info("Streaming for operations {} has started from block {} to {}".format(self.operations, start_block, stop_block)) for ops in self.blockchain.stream(opNames=self.operations, start=start_block, stop=stop_block, max_batch_size=self.max_batch_size, threading=self.threading, thread_num=8): try: callback(ops) except: logger.error("Failed when procssing operation {} with error: {}".format(ops, traceback.format_exc())) else: logger.info("Streaming for operations {} has started from the latest blocks".format(self.operations)) for ops in self.blockchain.stream(opNames=self.operations, max_batch_size=self.max_batch_size, threading=self.threading, thread_num=8): try: callback(ops) except: logger.error("Failed when procssing operation {} with error: {}".format(ops, traceback.format_exc())) except: logger.error("Failed when streaming operations {} with error: {}".format(self.operations, traceback.format_exc()))
def vote(self, post, weight=100): if post: if weight and weight >= -100 and weight <= 100: if weight > 0: if self.has_vp(up=True): return self._upvote(post, weight) else: logger.error("{} has no enough VP for upvote".format( self.author)) else: if self.has_vp(up=False): return self._downvote(post, weight) else: logger.error("{} has no enough VP for downvote".format( self.author)) else: logger.error( "Failed: the vote weight {} exceeds the range [-100, 100]". format(weight)) return False
import json from flask import Flask from utils.logging.logger import logger app = Flask("__app__", static_folder='tmp', static_url_path='') working_status = {} try: with open('working-status.json', 'r') as inp: data = inp.read() working_status = json.loads(data) except: logger.error('File working-status.json not found') if not 'max_video_index' in working_status: working_status['max_video_index'] = 0 def setReadyStatus(current_id, res_file): if current_id == None: return working_status[current_id]['status'] = 'ready' working_status[current_id]['url'] = res_file def setProcessStatus(current_id, message): if current_id == None: return working_status[current_id]['status'] = 'process'
def find_images(text, limit=1): logger.debug(f"start find images for {text}") try: query = {"text": text} query = urllib.parse.urlencode(query) url = "https://yandex.ru/images/search?" + query logger.debug(f"get url {url}") response = urllib.request.urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") images = [] for image in soup.find_all(attrs={"class": "serp-item__link"}): images.append(image["href"]) logger.debug(f"get {len(images)} images before limiting") limit = min(limit, len(images)) images = images[:limit] logger.info(f"get {len(images)} images") hrefs = [] for href in images: url = "https://yandex.ru" + href logger.debug(f"get image from {url}") session = HTMLSession() response = session.get(url) response.html.render() logger.debug(response.html) images = response.html.find('.preview2__arrow-image') logger.debug(images) src = None for image in images: logger.debug(image.attrs) try: test_src = image.attrs["src"] logger.debug(f"find src {src}") src = test_src break except Exception as error: logger.error(error) if src is not None: hrefs.append(src) return hrefs except Exception as error: logger.error(error) return []