def main(): parser = argparse.ArgumentParser(description='EOSIO testcase collections running tool.') parser.add_argument('--config', default="./config.json", type=str, help='config.json config file path') args = parser.parse_args() conf_file = os.path.abspath(os.path.expanduser(args.config)) # Check the parameters if not os.path.exists('testcases'): logger.error('call startmonster.py in the eostestmonster directory') sys.exit(1) conf_dict = None with open(conf_file, 'r') as fp: conf_dict = json.loads(fp.read()) if not conf_dict: logger.error('validator config can not be empty: {}'.format(conf_file)) sys.exit(1) if not check_exist_acct(conf_dict): logger.error('ERROR: the exist_account is NOT available') sys.exit(1) setup_env() # Start the testcase try: time_start = time.time() result = run_monster(conf_dict) if not result: logger.error('!!! Call testcases FAILED !!!') else: logger.info('Call testcases SUCCESS !!!') time_usage = time.time() - time_start logger.info('TIME USAGE:%ss' % (time_usage,)) return result except Exception as e: logger.error(traceback.print_exc())
def poll(self): message_count, invisible_message_count = self.message_counts() t = time() for deployment in self.deployments(): name = deployment.metadata.name logger.info("Checking deployment %s", name) if message_count >= self.options.scale_up_messages: if t - self.last_scale_up_time[ name] > self.options.scale_up_cool_down: self.scale_up(deployment) self.last_scale_up_time[name] = t else: logger.debug("Waiting for scale up cooldown") if message_count <= self.options.scale_down_messages: # special case - do not scale to zero unless there are no invisible messages if (invisible_message_count > 0 and deployment.spec.replicas <= invisible_message_count): logger.debug( "Not scaling down because messages are still in-flight" ) elif t - self.last_scale_down_time[ name] > self.options.scale_down_cool_down: self.scale_down(deployment) self.last_scale_down_time[name] = t else: if deployment.spec.replicas > self.options.min_pods: logger.debug("Waiting for scale down cooldown") # code for scale to use msg_count sleep(self.options.poll_period)
def access_auth(self, username, password): ''' 用于验证是否登录成功 :param username: 用户输入的用户名 :param password: 用户输入的面膜 :return: 返回字典,包含用户信息,是否登录成功 ''' user_info = {'status': False, 'msg': None} user_data = self.db_handle.load_data(username) if not user_data: user_info['msg'] = '找不到该文件' print('找不到该文件') return user_info if user_data['status'] == 1: user_info['msg'] = '该用户被冻结' return user_info if user_data['password'] != password: user_info['msg'] = '密码错误' return user_info expire_time = time.mktime( time.strptime(user_data['expire_date'], '%Y-%m-%d')) if time.time() > expire_time: user_info['msg'] = '该用户已经过期' return user_info user_info['status'] = True user_info['msg'] = '登录成功' user_info['user_data'] = user_data logger.info('%s login success' % (username)) return user_info
def run(options): """ poll_period is set as as part of k8s deployment env variable sqs_queue_url is set as as part of k8s deployment env variable """ logger.info("Starting autoscaler") SQSPoller(options).run()
def access_login(self): ''' 登录验证 :return:返回登录成功后的用户信息 ''' user_login_count = {} flag = True while flag: username = input('输入你的用户名:').strip() password = input('请输入你的密码:').strip() #校验是否存在该用户 is_user = self.user_register(username) if not is_user: print('该用户不存在') else: user_info = self.access_auth(username, password) #如果登录不成功 if not user_info['status']: logger.info('{}{}'.format(username, user_info['msg'])) print(user_info['msg']) if username not in user_login_count.keys(): user_login_count[username] = 0 user_login_count[username] += 1 if user_login_count[username] >= 3: flag = False self.frozen_user(username) else: return user_info
def get_followers(self, uid): size = 1000 url = urljoin(BASE_URL, FOLLOWERS_URL) params = { 'size': size, 'pageNo': 1, 'uid': uid, '_': int(time.time() * 1000) } respond = self._request(url, params=params) if not respond: return [] data = respond.json() max_page = data.get('maxPage') if not max_page: logger.error("获取粉丝失败") logger.error(data) raise ValueError("获取粉丝失败") result = data['followers'] for page in range(1, max_page): time.sleep(FOLLOWER_PAGE_INTEVAL) logger.info('开始抓取第%s页的粉丝' % page) params['pageNo'] = page params['_'] = int(time.time() * 1000) respond = self._request(url, params=params) if not respond: continue data = respond.json() result += data['followers'] return self.handle_followers(result)
def post(self, msg, audience=[]): p = {"api": "/statuses/update.json", "_": int(time.time() * 1000)} cookie = self.load_cookies() url = urljoin(BASE_URL, TOKEN_URL) r = self.session.get(url, params=p, cookies=cookie, headers=BASE_HEADER) try: token = r.json()['token'] except (IndexError, TypeError, ValueError): logger.error("MLGB 出错了!") logger.error("\n%s\n", r.text) return audience = ' @'.join(audience) audience = ' @' + audience.strip() msg = '%s %s' % (msg, audience) logger.info('发送的内容是: %s' % msg) msg = msg.encode().decode() data = {"status": "<p>%s</p>" % msg, "session_token": token} url = urljoin(BASE_URL, POST_URL) r = self.session.post(url, data=data, cookies=cookie, headers=BASE_HEADER) if r.status_code == 200: data = r.json() if not data.get('error_code') > -1: logger.debug("完事儿了.") return logger.error("MLGB 又出错了!") logger.error("\n%s\n", r.text) raise ValueError('发广播出错了')
def post(): for msg in POST_MESSAGE: for account in ACCOUNTS: logger.info('使用账户: %s' % account[0]) _post(account[0], account[1], msg) time.sleep(PERPOST_INTEVAL) logger.info('全部广播发送完毕')
def scale_down(self, deployment): if deployment.spec.replicas > self.options.min_pods: deployment.spec.replicas -= 1 logger.info("Scaling down to %d" % deployment.spec.replicas) self.update_deployment(deployment) elif deployment.spec.replicas < self.options.min_pods: self.scale_up(deployment) else: logger.debug("Min pods reached")
def test_cases(self, tdata): global row global sheet_index if row < self.excel_data[sheet_index] + 1: row += 1 else: row = 2 sheet_index += 1 headers, params, apiUrl, method, checkkpoint, host = tdata[ '请求头信息'], tdata['请求入参'], tdata['接口url'], tdata['请求方式'], tdata[ '检查点'], tdata["Host"] extract = tdata['提取变量'] '''解析host,如有变量就替换,例如{host}''' if isinstance(host, str) and host.startswith("{") and host.endswith("}"): value = host.split("{")[1].split("}")[0] host = read_basic_config.variables(value) url = host + apiUrl '''解析headers,替换头部信息包含上个接口提取的变量,例如{{token}}''' if headers: headers = eval(headers) for key, value in headers.items(): if isinstance(value, str) and value.startswith( "{{") and value.endswith("}}"): value = value.split("{{")[1].split("}}")[0] replace = read_extract_config.extract(value) headers[key] = replace elif isinstance( value, str) and value.startswith("{") and value.endswith("}"): value = value.split("{")[1].split("}")[0] replace = read_basic_config.variables(value) headers[key] = str(replace) '''开始请求接口,返回接口响应值''' client = HTTPClient(url=url, method=method, headers=headers) if method == 'get' or method == 'delete': res = client.send(params=params, extract=extract, count=row, sheet_index=sheet_index) else: res = client.send(data=params, extract=extract, count=row, sheet_index=sheet_index) '''校验excel中的检查点:1.字段的值和respone中的值相等 2.字段的值不为空''' if checkkpoint: try: checkkpoint = eval(checkkpoint) except NameError: from logs.log import logger logger.info('!!!!!预期结果不是Json格式!!!!!') result = is_json_contains(res, checkkpoint) Excel().write_result(sheet_index, row, result[0]) Excel().write_fail_message(sheet_index, row, result[1]) #Excel().write_number(row) self.assertTrue(result[0], result[1])
def send_chat_msg(): for msg in CHAT_MESSAGE: for account in ACCOUNTS: logger.info('使用账户: %s' % account[0]) try: _send_chat_msg(account[0], account[1], msg) except ValueError: continue logger.info('全部聊天发送完毕')
def run(self): options = self.options start_http_server(self.options.prometheus_port) logger.debug("Starting poll for {} every {}s".format(options.sqs_queue_url, options.poll_period)) logger.info("Started metrics exporter at port 9095") while True: self.poll() self.update_metrics() sleep(self.options.poll_period)
def scale_down(self): deployment = self.deployment() if deployment.spec.replicas > self.options.min_pods: logger.info("Scaling Down") deployment.spec.replicas -= 1 self.update_deployment(deployment) elif deployment.spec.replicas < self.options.min_pods: self.scale_up() else: logger.info("Min pods reached")
def scale_up(self): deployment = self.deployment() if deployment.spec.replicas < self.options.max_pods: logger.info("Scaling up") deployment.spec.replicas += 1 self.update_deployment(deployment) elif deployment.spec.replicas > self.options.max_pods: self.scale_down() else: logger.info("Max pods reached")
def frozen_user(self, username): ''' 冻结用户操作 :param username: 用户账号 :return: ''' user_data = self.db_handle.load_data(username) user_data['status'] = 1 result = self.db_handle.dump_data(username, user_data) if result: logger.info('%s 被冻结了' % username)
def GetObject(id, **kwargs): try: config_path = os.path.join(GloalConfig().config, 'config.xml') context = ApplicationContext(XMLConfig(config_path)) obj = context.get_object(id) logger.info(obj) return obj except: logger.error("", exc_info=1) return None
def _post(user_name, password, msg): spider = Spider(user_name=user_name, password=password) spider.visit_index() spider.login() post_obj = Post.get() logger.info(u'开始发送广播信息,从id为%s的开始' % post_obj.post_id) people = People.select().where(People.id > post_obj.post_id).limit(7) people = [_ for _ in people] audience = [_.user_name for _ in people] spider.post(msg, audience) post_obj.post_id = people[-1].id post_obj.save()
def dump_data(self, username, userinfo): ''' 保存数据 :return: ''' load_data_path = '%s/%s.json' % (self.db_path, username) if not os.path.isfile(load_data_path): logger.info('获取用户文件错误') return None with open(load_data_path, 'w', encoding='utf-8') as f: f.write(json.dumps(userinfo)) return True
def load_cookies(self): with open('spiders/.session') as f: try: data = pickle.load(f) except EOFError: return {} result = data.get(self.user_name) if not result: logger.info("账户未登录") return {} self.uid = result['uid'] cookies = result['cookies'] return cookies
def load_data(self, username): ''' 获取用户的数据 :return:返回用户的数据 ''' load_data_path = '%s/%s.json' % (self.db_path, username) if not os.path.isfile(load_data_path): logger.info('获取用户文件错误') return None with open(load_data_path, 'r', encoding='utf-8') as f: user_data = json.loads(f.read()) if user_data: return user_data
def scale_up(self, pods): """ Perform scale up Pods should never go above max pods and we shouldn't try to scale if we're already at the desired pod count Args: pods: int, desired pod count """ pods = pods if pods <= self.options.max_pods else self.options.max_pods if self.replicas != pods: logger.info(f"Scaling up to {pods}") self.deployment.spec.replicas = pods self.update_deployment() else: logger.info("Max pods reached")
def scale_down(self, pods): """ Perform scale down Pods should never go below min pods and we shouldn't try to scale if we're already at the desired pod count Args: pods: int, desired pod count """ pods = pods if pods >= self.options.min_pods else self.options.min_pods if self.replicas != pods: logger.info(f"Scaling down to {pods}") self.deployment.spec.replicas = pods self.update_deployment() else: logger.info("Min pods reached")
def import_class(import_str): """Returns a class from a string including module and class. .. versionadded:: 0.3 """ mod_str, _sep, class_str = import_str.rpartition('.') logger.info("__import__") __import__(mod_str) logger.info("__import__ end") try: cls = getattr(sys.modules[mod_str], class_str) return cls() except AttributeError: raise ImportError( 'Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
def wrapper(self, *args, **kw): who = 'hejianhao' logger.info('<==================== {1} Begin call [{0}] ===================='.format(__get_full_class(self), who)) start_time = time.time() try: c = func(self, *args, **kw) except Exception as err: text = '\n'.join(['an error occured ', str(err), traceback.format_exc()]) logger.error('ATP: 接口发现未知错误 \n {traceback}'.format(traceback=text)) c = jsonify({"code": "999", "desc": "system error"}) end_time = time.time() d_time = end_time - start_time logger.info("==================== End call [{0}], run {1:.3}s ====================>\n" .format(__get_full_class(self), d_time)) return c
def _send_chat_msg(user_name, password, msg): spider = Spider(user_name=user_name, password=password) spider.login() chat_obj = Chat.get() logger.info(u'开始发送聊天信息,从id为%s的开始' % chat_obj.chatting_id) people = People.select().where(People.id > chat_obj.chatting_id).limit(2) send_count = 0 for person in people: result = spider.chat(person.uid, msg) if not result: logger.error(u'发送给’%s‘失败' % person.user_name) time.sleep(PERCHAT_INTEVAL) continue send_count += 1 logger.info(u'第%s条消息,发送给’%s‘成功' % (send_count, person.user_name)) chat_obj.chatting_id = person.id chat_obj.save() time.sleep(PERCHAT_INTEVAL)
def crawl_people_info(): spider = Spider() spider.visit_index() star_people = spider.get_people() for item in star_people: logger.info(u'开始抓取’%s‘的粉丝' % item[1]) uid = item[0] if not if_int(uid): uid = spider.get_people_id(uid) if not uid: continue followers = spider.get_followers(uid) followers = [{'uid': _[0], 'user_name': _[1]} for _ in followers] with database.atomic(): for idx in range(0, len(followers), 100): People.insert_many(followers[idx:idx + 100]).execute() logger.info(u'总共抓取了%s个粉丝' % len(followers)) time.sleep(PER_STAR_FOLLOWER_INTEVAL) People.remove_duplicate()
def login(self): url = urljoin(BASE_URL, LOGIN_URL) if self.check_login(): logger.info('已经登录') return data = { 'areacode': 86, 'remember_me': 'on', 'username': self.user_name, 'password': self.get_hash(self.password), } if if_int(self.user_name): data['telephone'] = data.pop('username') response = self.session.post(url, headers=BASE_HEADER, data=data) logger.debug(response.content) if self.check_login(): logger.info('登录成功') self.get_people_id('8276760920') self.save_cookies() return raise ValueError('登录失败')
def scale_up(self): deployment = self.deployment() if deployment.spec.replicas < self.options.max_pods: logger.info("Scaling up") deployment.spec.replicas += 1 self.update_deployment(deployment) if self.options.get_messages_from_queue > 0: logger.info("Start Polling SQS Message...") self.get_messages_from_queue() else: logger.info("Skip Polling SQS Message...") elif deployment.spec.replicas > self.options.max_pods: self.scale_down() else: logger.info("Max pods reached")
def exec(self, api, endpoint, body=None, _ret_cnt=0): """ Execute a method against eosd RPC. Warnings: This command will auto-retry in case of node failure, as well as handle node fail-over, unless we are broadcasting a transaction. In latter case, the exception is **re-raised**. """ url = f"{self.node_url}/{self.api_version}/{api}/{endpoint}" body = self._body(body) method = 'POST' if body else 'GET' try: response = self.http.urlopen(method, url, body=body) except (MaxRetryError, ConnectionResetError, ReadTimeoutError, RemoteDisconnected, ProtocolError) as e: if _ret_cnt >= self.max_retries: raise e # try switching nodes before giving up time.sleep(_ret_cnt) self.next_node() logging.debug('Switched node to %s due to exception: %s' % (self.hostname, e.__class__.__name__)) return self.exec(api, endpoint, body, _ret_cnt=_ret_cnt + 1) except Exception as e: extra = dict(err=e, url=url, body=body, method=method) logger.info('Request error', extra=extra) raise e else: return self._return( response=response, body=body)
def _return(response=None, body=None): """ Process the response status code and body (json). Note: If re_raise flag is set, this method will raise an exception instead of returning None. Exceptions: EosdNoResponse on no response. HttpAPIError on non-200 response. Returns: Parsed response body. """ if not response: raise EosdNoResponse( 'eosd nodes have failed to respond, all retries exhausted.') result = response.data.decode('utf-8') if response.status != 200 or not result: extra = dict(result=result, response=response, request_body=body) logger.info('non ok response: %s', response.status, extra=extra) raise HttpAPIError(response.status, result) try: response_json = json.loads(result) except JSONDecodeError as e: extra = dict(response=response, request_body=body, err=e) logger.info('failed to parse response', extra=extra) else: result = response_json return result
def clear_log(): os.remove('logs/.spider.log') logger.info('日志清除完毕')
def remove_chat_history(): chat_obj = Chat.get() chat_obj.chatting_id = 0 chat_obj.save() logger.info(u'已经清空了发送历史')
def remove_post_history(): post_obj = Post.get() post_obj.post_id = 0 post_obj.save() logger.info(u'已经清空了发送广播历史')
def get_people(self): url = urljoin(BASE_URL, PEOPLE_URL) respond = self.session.get(url, headers=BASE_HEADER) result = get_people(respond.content) logger.info('抓取了%s个大V' % len(result)) return result
def remove_all_people(): People.remove_all() logger.info('粉丝删除完毕')
def remove_login(): Spider.clear_cookies() logger.info('登出完毕')