def actions(self, action, coin, param=''): try: pause_tab = '' if param != '' and type(param) is int: pause_tab = f"* {param} * * * crontab -r && crontab /tmp/ctab" # list of actions that are accepted actions = { "clean_wallet": f"rm -rf {self.wallet_directory}/{{blocks,peers.dat,chainstate}}", "kill_daemon": f"killall -9 {coin.daemon}", "pause_crontab": f"crontab -l > /tmp/ctab && crontab -r && echo {pause_tab} > /tmp/pt && crontab /tmp/pt ", "view_crontab": "crontab -l", "unlock_wallet": f"rm -f {self.wallet_directory}/.lock", "is_daemon_up": f"pidof {coin.daemon}", "ps": "ps -ef", "bs": f"cd {self.wallet_directory} && wget -q {param}", "clean_dupes_ctab": f"crontab -l | sort | uniq > /tmp/ctab && crontab /tmp/ctab" } result = self.connection.run(actions[action], hide=False) return result.stdout except Exception as e: logging.error(f"Problem in actions method for {action}", exc_info=e) return "{'status':'restart'}"
async def handle_start(self): # Check if user is already registered if self.is_invited: return 'You are already registered with me. \ Use /stop to deregister' # Check deeplinked start param for invite key invite_key = self.text[len('/start '):] if not invite_key: return 'Sorry, you must have a valid invite code to interact' # TODO: Call main app to validate invite key # For now a static invite key if invite_key != 'alethea': return 'Invalid invite code' # Register user username = self.user['username'] id = self.user['id'] try: await self.db.register_user( first_name=self.user.get('first_name', '-'), last_name=self.user.get('last_name', '-'), username=username, id=id, role='user') logging.info(f'{id} has registered') except Exception as e: logging.error(e) return 'Failed to register you. Try again.' return f'Welcome, {username}'
def __init__(self, masternode, coin): self.masternode = masternode kwargs = {} if "connection_certificate" in masternode: kwargs['key_filename'] = masternode["connection_certificate"] else: if "password" in masternode: kwargs['password'] = masternode["password"] if "destination_folder" in masternode: self.installed_folder = masternode["destination_folder"] else: self.installed_folder = coin.default_dir if "wallet_directory" in masternode: self.wallet_directory = masternode["wallet_directory"] else: self.wallet_directory = coin.default_wallet_dir try: self.connection = Connection(self.masternode["connection_string"], connect_timeout=31, connect_kwargs=kwargs) except UnexpectedExit as e: #possibly try to start the daemon again logging.error('Connecting failed unexpectedly', exc_info=e) return '{"status":"restart"}' except Exception as e: logging.error( f"Could not do_action {self.masternode['connection_string']} : {e}", exc_info=e) return '{"status":"restart"}'
def stored_requests(cohort, metric): """ View for processing stored requests """ global pkl_data hash_ref = pkl_data # Parse the cohort and metric IDs try: hash_ref = hash_ref[ 'cohort_expr' + HASH_KEY_DELIMETER + cohort][ 'metric' + HASH_KEY_DELIMETER + metric] except Exception: logging.error(__name__ + '::Request not found for: %s' % request.url) return redirect(url_for('cohorts') + '?error=2') # Parse the parameter values for param in REQUEST_META_QUERY_STR: if param in request.args: try: hash_ref = hash_ref[ param + HASH_KEY_DELIMETER + request.args[param]] except KeyError: logging.error(__name__ + '::Request not found for: %s' % request.url) return redirect(url_for('cohorts') + '?error=2') # Ensure that that the data is a HTTP response object if hasattr(hash_ref, 'status_code'): return hash_ref else: return redirect(url_for('cohort') + '?error=2')
def update_users(self, user_obj): """ 更新用户信息 :param user_obj: :return: """ if not isinstance(user_obj, User): raise TypeError('parameter must satisfy type(user_obj) == User') try: sql = """update user set followers=?, watchers=?, nickname=?, avatar_path=?, avatar_url=?, des=?, is_pro=? where user_id=?""" self.cur.execute(sql, (user_obj.followers, user_obj.watchers, user_obj.nickname, user_obj.avatar_path, user_obj.avatar_url, user_obj.des, user_obj.is_pro, user_obj.user_id)) return True except (sqlite3.OperationalError, sqlite3.ProgrammingError): logging.error('cannot update user_id={} user info'.format(user_obj.user_id)) return False
def user_request(user, metric): """ View for requesting metrics for a single user """ user = str(escape(user)) url = request.url.split(request.url_root)[1] # If it is a user name convert to ID if search(MW_UNAME_REGEX, user): # Extract project from query string # @TODO `project` should match what's in REQUEST_META_QUERY_STR project = request.args['project'] if 'project' in request.args \ else 'enwiki' logging.debug(__name__ + '::Getting user id from name.') conn = dl.Connector(instance='slave') conn._cur_.execute('SELECT user_id FROM {0}.user WHERE ' \ 'user_name = "{1}"'.format(project, user)) try: user_id = str(conn._cur_.fetchone()[0]) url = sub(user,user_id, url) except Exception: logging.error(error_codes[3]) return redirect(url_for('all_cohorts') + '?error=3') url = sub('user','cohorts', url) return redirect(url)
async def findallhdmp4(stime): print("begin") mp4list=[] client=AsyncIOMotorClient(ServerParameters.mongodbpath) db=client.jt808 cursor = db["Web_EventUpload"].find({"EventTime":{'$gt':stime}}) count=0 try: list1=await cursor.to_list(100) ''' while (await cursor.fetch_next): count += 1 print(count) datafileid= cursor.next_object()["datafileid"] print(datafileid) ''' print(list1) except BaseException as e: logging.error(e) pass print("over") return
def async_cli(self, action, coin): ''' eventually offer async_cli functions :param action: :param coin: :return: ''' try: cmd = f"{self.installed_folder}/{coin.cli} -datadir={self.wallet_directory} {action}" logging.info( f"Attempting to execute command from masternode object: {cmd}") ''' need to have a threadpool and throw this in there and await the result ''' result = self.connection.run(cmd, hide=False) logging.info( f"Executed {result.command} on {result.connection.host}, got stdout:\n{result.stdout}" ) return result.stdout except UnexpectedExit as e: #possibly try to start the daemon again logging.warning(f"{coin.cli} exited unexpectedly", exc_info=e) return '{"status":"restart"}' except Exception as e: logging.error(f"Could not do action on daemon at {self.getIP()}")
async def handle_stop(self): id = self.user['id'] try: await self.db.deregister_user(id) logging.info(f'{id} has deregistered') except Exception as e: logging.error(e) return 'Goodbye'
def job_queue(): """ View for listing current jobs working """ error = get_errors(request.args) def error_class(em): return { 'failure': 'error', 'pending': 'warning', 'success': 'success' }.get(em, '') p_list = list() p_list.append(Markup('<thead><tr><th>is_alive</th><th>PID</th><th>url' '</th><th>status</th></tr></thead>\n<tbody>\n')) for p in processQ: try: # Pull data off of the queue and add it to the queue data while not p.queue.empty(): if not queue_data.has_key(p.id): queue_data[p.id] = json.loads(p.queue.get().data) else: for k,v in queue_data[p.id]: if hasattr(v,'__iter__'): queue_data[p.id][k].extend(v) # once a process has finished working remove it and put its # contents into the cache if not p.process.is_alive() and p.status[0] == 'pending': q_response = make_response(jsonify(queue_data[p.id])) del queue_data[p.id] set_data(p.request, q_response, pkl_data) p.status[0] = 'success' logging.info(__name__ + '::Completed request %s.' % p.url) except Exception as e: p.status[0] = 'failure' logging.error(__name__ + "::Could not update request: %s. " "Exception: %s" % (p.url, e.message) ) # Log the status of the job response_url = "".join(['<a href="', request.url_root, p.url + '">', p.url, '</a>']) p_list.append(Markup('<tr class="'+ error_class(p.status[0])+'"><td>')) p_list.append("</td><td>".join([str(p.process.is_alive()), str(p.process.pid), escape(Markup(response_url)), p.status[0]])) p_list.append(Markup('</td></tr>')) p_list.append(Markup('\n</tbody>')) if error: return render_template('queue.html', procs=p_list, error=error) else: return render_template('queue.html', procs=p_list)
def setup(self): """setup""" headers = { 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8', 'accept-encoding': 'gzip, deflate, br', 'accept-language': 'en-US,en;q=0.9,pt;q=0.8,zh-CN;q=0.7,zh;q=0.6', 'cache-control': 'max-age=0', 'upgrade-insecure-requests': '1', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36', } video_url = 'https://www.ixigua.com/a{}'.format(self.video_id) try: if self.proxies is None: req = requests.get(url=video_url, headers=headers, timeout=XConfig.TIMEOUT) else: req = requests.get(url=video_url, proxies=self.proxies, headers=headers, timeout=XConfig.TIMEOUT) self.data = req.text self.parse_views() self.parse_likes() self.parse_dislikes() self.parse_comments() self.status = 1 # 没有错误 except requests.HTTPError as http_e: logging.error( 'network error when request video page. Reason:{}'.format( http_e)) self.status = 2 except requests.Timeout: logging.error('time out when request video page. ') self.status = 3 except AttributeError as attr_e: # record_data(self.data, type='html') if len(self.data) < 200: logging.error( 'block by the website. request content : {}'.format( self.data)) self.status = 3 else: logging.error( 'attribution error occur. Reason:{}'.format(attr_e)) self.status = 4 except requests.exceptions.ProxyError: logging.error('proxy error when request video page') self.status = 3
def find_all(self, element_loc): """定位一组元素""" logging.info("定位一组元素: {}".format(element_loc)) try: return self.driver.find_elements(*element_loc) except NoSuchElementException: logging.error("定位一组元素失败: {}".format(element_loc)) self.driver.save_snapshot("1.png") raise
def find(self, element_loc): # ('id', 'kw') """通过元素定位器定位元素""" logging.info("定位元素: {}".format(element_loc)) try: return self.driver.find_element(*element_loc) except NoSuchElementException: logging.error("定位元素失败: {}".format(element_loc)) self.snapshot("定位失败", "_".join(element_loc)) # element_loc = ('id', 'kw') raise
def _process_help(args): state = args[1] thread_args = NamespaceEditsArgsClass(state[0],state[1],state[2],state[3]) user_data = args[0] conn = Connector(instance='slave') to_string = DataLoader().cast_elems_to_string to_csv_str = DataLoader().format_comma_separated_list # Format user condition user_cond = "rev_user in (" + to_csv_str(to_string(user_data)) + ")" # Format timestamp condition ts_cond = "rev_timestamp >= %s and rev_timestamp < %s" % (thread_args.date_start, thread_args.date_end) if thread_args.log: logging.info(__name__ + '::Computing namespace edits. (PID = %s)' % getpid()) logging.info(__name__ + '::From %s to %s. (PID = %s)' % ( str(thread_args.date_start), str(thread_args.date_end), getpid())) sql = """ SELECT r.rev_user, p.page_namespace, count(*) AS revs FROM %(project)s.revision AS r JOIN %(project)s.page AS p ON r.rev_page = p.page_id WHERE %(user_cond)s AND %(ts_cond)s GROUP BY 1,2 """ % { "user_cond" : user_cond, "ts_cond" : ts_cond, "project" : thread_args.project, } conn._cur_.execute(" ".join(sql.split('\n'))) # Tally counts of namespace edits results = dict() for user in user_data: results[str(user)] = OrderedDict() for ns in NamespaceEdits.VALID_NAMESPACES: results[str(user)][str(ns)] = 0 for row in conn._cur_: try: if row[1] in NamespaceEdits.VALID_NAMESPACES: results[str(row[0])][str(row[1])] = int(row[2]) except KeyError: logging.error(__name__ + "::Could not process row: %s" % str(row)) pass except IndexError: logging.error(__name__ + "::Could not process row: %s" % str(row)) pass del conn return [(user, results[user]) for user in results]
def iframe_enter(self, element): """ :user : 切入框架 :param element: 传入要切入的框架,name or id """ logging.info(f'切入框架{element}') try: self.driver.switch_to.frame(element) except BaseException: logging.error(f'切入框架失败,元素是{element}') raise
def testlog(num): for x in range(num): if x%3==0: logging.error('错误') if x%4==0: logging.fatal('致命错误') else: logging.debug(str(x)) sleep(3)
def insert(self, user_obj): """ 将采样之后的数据放到新的数据库中。 :param user_obj: 用户对象 :return: """ try: sql = """insert into user values(?,?,?,?,?,?,?);""" self.new_cur.execute(sql, user_obj.dump()) return True except (sqlite3.ProgrammingError, sqlite3.OperationalError): logging.error('cannot insert new user into user ')
def close(self): """ When the instance is deleted store the pickled data """ global pkl_data pkl_file = None try: pkl_file = open(settings.__data_file_dir__ + 'api_data.pkl', 'wb') cPickle.dump(pkl_data, pkl_file) except Exception: logging.error(__name__ + '::Could not pickle data.') finally: if hasattr(pkl_file, 'close'): pkl_file.close()
def get_all_users(self): """ 从数据库中抽取所有用户 :return: """ try: sql = """select user_id from user""" res = self.cur.execute(sql) return res except (sqlite3.OperationalError, sqlite3.ProgrammingError): logging.error('cannot get all user_id from total_bd') return None
def my_element(self, element): """ :user : 定位器 :param element: 调用函数时传入element 实例('xpath','元素'),元组类型 """ logging.info("定位器,元素是:{}".format(element)) try: return self.driver.find_element(*element) except BaseException: id = random.randint(10000, 99999) logging.error('定位失败,定位的元素是:{},图片id是:{}'.format(element, id)) raise
def kill_daemon(self, coin): try: kill = f"{self.installed_folder}/{coin.cli} -datadir={self.wallet_directory} stop" result = self.connection.run(kill, hide=False) return result.stdout except UnexpectedExit as e: #possibly try to start the daemon again logging.warning(f"{coin.cli} exited unexpectedly", exc_info=e) return '{"status":"restart"}' except Exception as e: logging.error(f"Could not do action on daemon at {self.getIP()}") return '{"status":"restart"}'
async def findlargemp4fileffmpeg(starttime, endtime): #print("begin findlargemp4fileffmpeg") mp4list = [] client = AsyncIOMotorClient(ServerParameters.mongodbpath) db = client.jt808 bucket = AsyncIOMotorGridFSBucket(db, "eventuploadvideos") cursor = bucket.find({ "uploadDate": { '$gt': starttime, '$lte': endtime }, "filename": { "$regex": ".mp4$" } }) filelist = await cursor.to_list(100000) ccount = 0 for fi in filelist: if fi["length"] > 1000000: print(fi) if os.path.exists(fi["filename"]): os.remove(fi["filename"]) ds = await bucket.open_download_stream(fi["_id"]) f = open("input" + fi["filename"], 'wb') bbb = await ds.read() f.write(bbb) f.close() ds.close() converttstoh264("input" + fi["filename"], fi["filename"]) if os.path.exists("input" + fi["filename"]): os.remove("input" + fi["filename"]) # 保存到bucket try: if os.path.exists(fi["filename"]): uf = open(fi["filename"], "rb") ubbb = uf.read() uf.close() os.remove(fi["filename"]) bucket.delete(fi["_id"]) uds = bucket.open_upload_stream_with_id( fi["_id"], fi["filename"]) await uds.write(ubbb) uds.close() ccount = ccount + 1 logging.info("convert %s %s", fi["_id"], fi["filename"]) except BaseException as e: logging.error(e) logging.info("end findlargemp4fileffmpeg total %s convert %s", len(filelist), ccount) return
def update_followers(self, user_id, followers): """ 更新total_database中一个用户的粉丝数目 :param user_id: :param followers: :return: """ try: sql = """update user set followers=? where user_id=?""" self.all_cur.execute(sql, (followers, user_id)) return True except (sqlite3.OperationalError, sqlite3.ProgrammingError): logging.error('cannot update {} followers to {}'.format(user_id, followers)) return False
def local_create_tx(request, size=1000): """ calls create code to create a TX, by default right now should create an 1000 sized output and return the tx signed and ready to broadcast :param request: :param size: :return: """ try: size = float(size) except ValueError: return json.dumps({ "success": "failed", "message": f"'{size}' is not a number, tr" }) creator = Create('config.json', size) try: [inputs, keychain, keys, total] = creator.get_collat(creator.rpc.listunspent()) except Exception as e: logging.error(f"Failed because: {str(e)}") return json.dumps({"success": "failed", "message": str(e)}) [change_debug_address, mn_debug_address] = creator.get_empty_addresses(2) tx = creator.prepare_raw_tx(mn_debug_address, change_debug_address, inputs, total) # not used (maybe for debug) decoded = creator.rpc.decoderawtransaction(tx) ''' this part could be done on a separate "cold storage" machine with the priovate keys/ keychain it uses keys from keychain to sign the tx https://bitcoin.org/en/developer-examples#complex-raw-transaction We sign multiple times if there are several UTXOs in the input, once completely signed result should be "complete=true", signrawtransaction does this automatically when given an array of privatekeys the second param should be previous dependant tx for some reason it works with null. ''' signed = creator.rpc.signrawtransaction(tx, [], keychain) return json.dumps({ "inputs": inputs, "mn_addr": mn_debug_address, "signed": signed })
def run_pool(category): rp.params['tag'] = category user_ids = [] try: r = requests.get(rp.base_url, params=rp.params, headers=rp.headers, timeout=XConfig.TIMEOUT, cookies=rp.cookies) data = json.loads(r.text.encode('utf-8'), encoding='ascii') for item in data['data']: user_id = extract_user_id(item['source_open_url']) if user_id is None: continue user_ids.append(user_id) except requests.Timeout: logging.error('timeout occur when requesting user ids') except json.JSONDecodeError: logging.error( 'error occur when decoding requests.text to json. r={}/category={}' .format(r, category)) except requests.HTTPError: logging.error('http error occur when requesting user ids') except requests.ConnectionError: logging.error('connection error occur when requesting user ids') return list(set(user_ids))
def __init__(self, **kwargs): um.UserMetric.__init__(self, **kwargs) # Add the parameter definitions from the threshold type self.apply_default_kwargs(kwargs,'init') try: self._threshold_obj_ = self.__threshold_types[ kwargs['threshold_type_class']](**kwargs) except NameError: logging.error(__name__ + '::Invalid threshold class. ' 'Using default (EditCountThreshold).') self._threshold_obj_ = self.EditCountThreshold(**kwargs)
def daemonconf(self, coin): try: daemonconf = coin.scripts['local_path'] + coin.scripts['confdaemon'] pw = self.generatePassword() ip = self.getIP() coin_name = "polis" self.connection.put(daemonconf) cmd = f"/bin/bash {coin.scripts['confdaemon']} {coin_name} {coin.addnode} {self.installed_folder} {ip} {pw}" result = self.connection.run(cmd, hide=False) #should contain masternodeprivkey return result.stdout except Exception as e: logging.error('Exception in daemonconf ') return '{"status":"failed"}'
def daemon_action(self, coin, reindex=0): try: cmd = f"{self.installed_folder}/{coin.daemon} -datadir={self.wallet_directory}" if reindex == 1: cmd += " -reindex" result = self.connection.run(cmd, hide=False) logging.info( f"Executed {result.command} on {result.connection.host}, got stdout:\n{result.stdout}" ) return result.stdout except UnexpectedExit as e: logging.warning(f"{coin.daemon} exited unexpectedly", exc_info=e) return '{"status":"restart"}' except Exception as e: logging.error(f"Could not do action on daemon at {self.getIP()}")
def _url_join(base_url, params): """ 连接url和params :param base_url: :param params: :return: """ if not isinstance(params, dict): logging.error('url params must be dictionary') if base_url[-1] != '?': base_url += '?' for keys in params: item = "{}={}&".format(keys, params[keys]) base_url += item return base_url[:-1]
def get_users_url(self, user_ids): """ 从user_id中提取用户页面的url :param user_ids: 用户id :return: """ if not isinstance(user_ids, (list, tuple)): logging.error( 'user_ids must be list or tuple in func=get_user_url') user_urls = [] pre_params = {'to_user_id': '', 'format': 'json'} for user_id in user_ids: pre_params['to_user_id'] = user_id user_urls.append(Extract._url_join(self._base_user_url, pre_params)) return user_urls
def install_sentinel(self, coin): try: self.connection.put(coin.scripts["local_path"] + coin.scripts["sentinel_setup"]) cmd = f"/bin/bash {coin.scripts['sentinel_setup']} {coin.sentinel_git} {self.installed_folder} {coin.coin_name} {self.wallet_directory}" result = self.connection.run(cmd, hide=False) logging.info(f"Uploaded sentinel_setup.sh:\n {result}") return result except UnexpectedExit as e: logging.warning(f"{coin.cli} exited unexpectedly ", exc_info=e) return '{"status":"failed"}' except Exception as e: logging.error( f"Could not do_action {self.masternode['connection_string']} : {e}", exc_info=e) return '{"status":"failed"}'
def get_num_block_statistics(): index_uuid = dict() index_count = 0 for program in config.PROGRAMS: dirs = os.listdir(os.path.join(config.FEA_DIR, program, \ config.CFG_DFG_GEMINIFEA_VULSEEKERFEA)) logging.debug('original dirs:{}\n{}'.format(dirs, len(dirs))) dirs = [d for d in dirs if filter_by_arch_opt_levels(d)] logging.debug('PROGRAMS: {}, ARCHS: {}, OPT_LEVELS: {}'.format( \ config.PROGRAMS, config.ARCHS, config.OPT_LEVELS)) logging.debug('filtered dirs:{}\n{}'.format(dirs, len(dirs))) for d in dirs: index_uuid.setdefault(str(index_count), os.path.join(program, d)) index_count += 1 logging.debug('index_uuid: {}'.format(index_uuid)) logging.debug('index_count: {}'.format(index_count)) func_list_arr = [] func_list_dict = defaultdict(list) block_num_dict = defaultdict(int) for k, v in index_uuid.items(): program, v = v.split(os.sep) cur_functions_list_file = os.path.join(config.FEA_DIR, program, \ config.CFG_DFG_GEMINIFEA_VULSEEKERFEA, v, 'functions_list.csv') if not os.path.exists(cur_functions_list_file): logging.error('No functions_list.csv in {}'.format(v)) with open(cur_functions_list_file, 'r') as fp: logging.debug('Gen dataset: {}'.format(cur_functions_list_file)) for line in csv.reader(fp): if line[0] == '': continue block_num_dict[int(line[1])] += 1 if block_num_max > 0: if not (int(line[1]) >= block_num_min and \ int(line[1]) <= block_num_max): continue if line[0] not in func_list_dict: func_list_arr.append(line[0]) value = os.path.join(line[4], \ config.CFG_DFG_GEMINIFEA_VULSEEKERFEA, line[5], line[0]) func_list_dict[line[0]].append(value) return block_num_dict, len(func_list_arr)
def save_file(file, filename, account_path): if allowe_file(filename): bigpath = os.path.join(current_app.root_path, account_path, now_filename()) if not os.path.exists(bigpath): try: os.makedirs(bigpath) except: logging.info("warning 创建目录失败") return "创建目录失败, 看日志去%s" % bigpath # new_name = change_filename(filename) filepath = bigpath + "/" + filename try: file.save(filepath) except Exception as e: logging.error("写入文件出错:" + str(e)) return str(filepath)
def wait_find(self, element, max_time=10, interva_time=1): """ :user :显示等待 :param element: 调用该函数时传入需要定位的元素,元组形式传入,示例('xpath','xxxxx') :param max_time: 调用该函数时传入需要循环定位的最大时间,默认是10s,int类型 :param interva_time: 调用该函数时传入循环定位的间隔时间,默认1s循环一次,int类型 """ logging.info("通过显示等待定位{}".format(element)) try: return WebDriverWait(self.driver, max_time, interva_time).until( ES.presence_of_element_located(element)) except BaseException: id = random.randint(10000, 99999) logging.error("定位元素失败,定位的元素是:{},图片id是:{}".format(element, id)) self.screenshot(id, '定位失败') raise
def get_cohort_id(utm_name): """ Pull cohort ids from cohort handles """ conn = dl.Connector(instance='slave') conn._cur_.execute('SELECT utm_id FROM usertags_meta ' 'WHERE utm_name = "%s"' % str(escape(utm_name))) utm_id = None try: utm_id = conn._cur_.fetchone()[0] except ValueError: pass # Ensure the field was retrieved if not utm_id: logging.error(__name__ + '::Missing utm_id for cohort %s.' % str(utm_name)) utm_id = -1 del conn return utm_id
def install_watcher(self, coin): try: self.connection.put(coin.scripts["local_path"] + coin.scripts["watcher_cron"]) logging.info('Uploaded watcher_cron.sh') cmd = f"/bin/bash {coin.scripts['watcher_cron']} {coin.name} {self.installed_folder} {coin.daemon} {self.wallet_directory}" result = self.connection.run(cmd, hide=False) if result.stdout == '' and result.stderr == '': return "{'status':'success'}" return "{'status':'There was a problem installing watcher'}" except UnexpectedExit as e: logging.warning(f"{coin.cli} exited unexpectedly", exc_info=e) return '{"status":"failed"}' except Exception as e: logging.error( f"Could not do_action {self.masternode['connection_string']} : {e}", exc_info=e) return '{"status":"failed"}'
def get_cohort_refresh_datetime(utm_id): """ Get the latest refresh datetime of a cohort. Returns current time formatted as a string if the field is not found. """ conn = dl.Connector(instance='slave') conn._cur_.execute('SELECT utm_touched FROM usertags_meta ' 'WHERE utm_id = %s' % str(escape(utm_id))) utm_touched = None try: utm_touched = conn._cur_.fetchone()[0] except ValueError: pass # Ensure the field was retrieved if not utm_touched: logging.error(__name__ + '::Missing utm_touched for cohort %s.' % str(utm_id)) utm_touched = datetime.now() del conn return utm_touched.strftime(DATETIME_STR_FORMAT)
def set_data(request_meta, data, hash_table_ref): """ Given request meta-data and a dataset create a key path in the global hash to store the data """ key_sig = list() # Build the key signature for key_name in REQUEST_META_BASE: # These keys must exist key = getattr(request_meta, key_name) if key: key_sig.append(key_name + HASH_KEY_DELIMETER + key) else: logging.error(__name__ + '::Request must include %s. ' 'Cannot set data %s.' % ( key_name, str(request_meta))) return for key_name in REQUEST_META_QUERY_STR: # These keys may optionally exist if hasattr(request_meta,key_name): key = getattr(request_meta, key_name) if key: key_sig.append(key_name + HASH_KEY_DELIMETER + key) logging.debug(__name__ + "::Adding data to hash @ key signature = {0}". format(str(key_sig))) # For each key in the key signature add a nested key to the hash last_item = key_sig[len(key_sig) - 1] for key in key_sig: if key != last_item: if not (hasattr(hash_table_ref, 'has_key') and hash_table_ref.has_key(key) and hasattr(hash_table_ref[key], 'has_key')): hash_table_ref[key] = OrderedDict() hash_table_ref = hash_table_ref[key] else: hash_table_ref[key] = data
def parse_user_page(self, user_id): """ 解析用户页面 :param user_id: :return: User对象 """ url = self.get_user_url(user_id) try: req = requests.get(url, cookies=self.config.cookies, headers=self.config.headers, timeout=self.config.timeout) data = json.loads(req.text.encode('utf-8'), encoding='ascii') if data['message'] != 'success': logging.info('do not success when request user page!') return None user = User(user_id) user.followers = int(data['user_info']['followers_count']) user.watchers = int(data['user_info']['following_count']) user.des = data['user_info']['description'] user.avatar_url = data['user_info']['avatar_url'] user.nickname = data['user_info']['name'] if data['user_info']['user_verified'] == 'true': user.is_pro = 1 else: user.is_pro = False return user except (requests.Timeout, requests.HTTPError, requests.ConnectionError): logging.error('failed to request user page = {}'.format(url)) return None except json.JSONDecodeError: logging.error( 'cannot decode {} response data to json object'.format(url)) return None except KeyError as e: logging.error('cannot parse user info. reason:{}'.format(e)) return None
def output(cohort, metric): """ View corresponding to a data request - All of the setup and execution for a request happens here. """ global global_id url = request.url.split(request.url_root)[1] # Check for refresh flag - drop from url refresh = True if 'refresh' in request.args else False if refresh: url = sub(REFRESH_REGEX,'',url) # Get the refresh date of the cohort try: cid = get_cohort_id(cohort) cohort_refresh_ts = get_cohort_refresh_datetime(cid) except Exception: cohort_refresh_ts = None logging.error(__name__ + '::Could not retrieve refresh ' 'time of cohort.') # Build a request. Populate with request parameters from query args. rm = RequestMetaFactory(cohort, cohort_refresh_ts, metric) for param in REQUEST_META_QUERY_STR: if param in request.args and hasattr(rm, param): if not request.args[param]: # Assign a value indicating presence of a query var setattr(rm, param, DEFAULT_QUERY_VAL) else: setattr(rm, param, request.args[param]) # Process defaults for request parameters try: process_request_params(rm) except MetricsAPIError as e: return redirect(url_for('cohorts') + '?error=' + e.message) # Determine if the request maps to an existing response. If so return it. # Otherwise compute. data = get_data(rm, pkl_data) if data and not refresh: return data else: # Ensure that the job for this url is not already running is_pending_job = False for p in processQ: if not cmp(rm, p.request) and p.status[0] == 'pending': is_pending_job = True if not is_pending_job: # Queue the job q = mp.Queue() p = mp.Process(target=process_metrics, args=(q, rm)) p.start() global_id += 1 logging.info(__name__ + '::Appending request %s to the queue...' % rm) processQ.append(QStructClass(global_id,p,rm,url,q,['pending'])) return render_template('processing.html', url_str=str(rm)) else: return redirect(url_for('job_queue') + '?error=0')