def _cmd_hexists_handler(self): key = self.parse_string_val() field = self.parse_string_val() code, val = self._memory.hexists(key, field) logger.debug("HExists: %s => %s, %s" % (key, code, val)) return code, val
def on_connect(self, client, userdata, flags, rc): try: if rc == 0: logger.debug("Mqtt - Connected , result code {}".format(rc)) topic = "/{api_version}/{mqtt_topic}/{client_id}/channels/#".format( mqtt_topic=settings.mqtt_topic, api_version=settings.api_version, client_id=settings.client_id) if self.subscribe: logger.notice("Mqtt - Will subscribe to {}".format(topic)) self.mqtt_client.subscribe(topic, qos=0) if self._on_connect_callback: self._on_connect_callback.__call__( **self._on_connect_callback_params) elif 0 < rc < 6: raise Exception(RC_LIST[rc]) except Exception: logger.error("Mqtt Exception- {}".format( traceback.format_exc(limit=5))) os._exit(1)
def _cmd_lpop_handler(self): key = self.parse_string_val() code, val = self._memory.lpop(key) logger.debug("LPOP: %s => %s:%d" % (key, val, code)) return code, val
def get_access(): """ To send authorization request with 0Auth2.0 to Muzzley platform """ logger.verbose("Trying to authorize with Muzzley...") data = { "client_id": settings.client_id, "client_secret": settings.client_secret, "response_type": settings.grant_type, "scope": settings.scope, "state": "active" } url = settings.auth_url try: logger.debug("Initiated POST - {}".format(url)) resp = requests.post(url, data=data) if resp.status_code == 200: logger.notice("Manager succesfully Authorized with Muzzley") store_info(resp.json()) start_refresher() else: error_msg = format_response(resp) raise Exception(error_msg) except Exception: logger.alert("Unexpected error during authorization {}".format( traceback.format_exc(limit=5))) raise
def patch_custom_endpoints(self): try: custom_endpoints = settings.custom_endpoints url = settings.webhook_url data = {'quote_actions': {}} for endpoint in custom_endpoints: data['quote_actions'].update({ endpoint['namespace']: f"{settings.schema_pub}://{settings.host_pub}/" f"{settings.api_version}{endpoint['uri']}" }) if data['quote_actions']: logger.debug(f"[patch_custom_endpoints] Initiated PATCH - {url}") logger.verbose("\n{}\n".format(json.dumps(data, indent=4, sort_keys=True))) resp = requests.patch(url, data=json.dumps(data), headers=self.session.headers) logger.verbose("[patch_{}] Received response code[{}]".format(endpoint['namespace'], resp.status_code)) logger.verbose("\n{}\n".format(json.dumps(resp.json(), indent=4, sort_keys=True))) if int(resp.status_code) == 200: logger.notice(f"[patch_custom_endpoints] {endpoint['namespace']} setup successful!") else: raise Exception(f"[patch_custom_endpoints] {endpoint['namespace']} setup not successful!") except Exception: logger.alert("Failed at patch endpoint! {}".format(traceback.format_exc(limit=5))) raise
def on_publish(self, client, userdata, mid): logger.debug( "\n\n\n\n\n\t\t\t\t\t******************* ON PUBLISH ****************************" ) logger.verbose( "Mqtt - Publish acknowledged by broker, mid({}) userdata={}.". format(mid, userdata))
def __init__(self, client_id=None, access_token=None, implementer=None, queue=None, queue_pub=None, subscribe=True, **kwargs): logger.debug("Mqtt - Init") self.mqtt_client = paho.Client() self.mqtt_client.enable_logger() self.mqtt_client.on_connect = self.on_connect if 'on_connect' not in kwargs else kwargs[ 'on_connect'] self.mqtt_client.on_subscribe = self.on_subscribe if 'on_subscribe' not in kwargs else kwargs[ 'on_subscribe'] self.mqtt_client.on_message = self.on_message if 'on_message' not in kwargs else kwargs[ 'on_message'] self.mqtt_client.on_disconnect = self.on_disconnect if 'on_disconnect' not in kwargs else kwargs[ 'on_disconnect'] self.mqtt_client.on_publish = self.on_publish if 'on_publish' not in kwargs else kwargs[ 'on_publish'] self._topics = [] self._on_connect_callback = None self._on_connect_callback_params = {} self.client_id = client_id if client_id else settings.client_id self.access_token = access_token if access_token else settings.block[ "access_token"] self.db = get_redis() self.implementer = implementer self.queue = queue self.queue_pub = queue_pub self.subscribe = subscribe
def handle_request(self, request): logger.debug( "\n\n\n\n\n\t\t\t\t\t*******************HANDLE_REQUEST****************************" ) logger.info(f"Request {request}") downstream_result = self.implementer.downstream(request) downstream_list = downstream_result if type( downstream_result) == list else [downstream_result] for downstream_tuple in downstream_list: try: case = downstream_tuple[0] data = downstream_tuple[1] if case is not None and data is not None: try: custom_mqtt = downstream_tuple[3] custom_mqtt.publisher(io="iw", data=data, case=case) except (IndexError, AttributeError): self.queue.put({ "io": "iw", "data": data, "case": case })
def handle_channel_requests(self, client_id, owner_id, channel_template, paired_devices): logger.debug( "\n\n\n\n\n\t\t\t\t\t*******************HANDLE_CHANNEL_REQUEST****************************" ) logger.info( f"Client_id {client_id}; Owner_id: {owner_id}; Channel_template: {channel_template}; " f"Paired_devices: {paired_devices}") credentials = self.db.get_credentials(client_id, owner_id) channels = [] if paired_devices: loop = asyncio.new_event_loop() responses = loop.run_until_complete( self.send_channel_requests(paired_devices, client_id, owner_id, channel_template, credentials)) channels = [{ "id": channel_id } for channel_id in responses if channel_id] loop.close() if channels: if settings.config_refresh.get('enabled') is True: ignore_keys = [] old_credentials = {} channel_id = None for channel in channels: channel_id = channel['id'] credentials = self.implementer.auth_response( credentials) credentials = self.implementer.update_expiration_date( credentials) old_credentials = self.implementer.auth_response( self.db.get_credentials(client_id, owner_id, channel_id)) if 'client_man_id' not in old_credentials: credentials, has_error = self.implementer.check_manager_client_id( owner_id, channel_id, credentials, old_credentials) else: credentials['client_man_id'] = old_credentials[ 'client_man_id'] self.db.set_credentials(credentials, client_id, owner_id, channel_id) ignore_keys.append( f'credential-owners/{owner_id}/channels/{channel_id}' ) self.thread_pool.add_task(handle_credentials, credentials, old_credentials, client_id, owner_id, channel_id, ignore_keys) else: for channel in channels: self.db.set_credentials(credentials, client_id, owner_id, channel['id']) logger.info(f"Channels: {channels}") return channels, credentials
def patch_endpoints(self): try: full_host = "{}://{}/{}".format(settings.schema_pub, settings.host_pub, settings.api_version) data = { "authorize": "{}/authorize".format(full_host), "receive_token": "{}/receive-token".format(full_host), "devices_list": "{}/devices-list".format(full_host), "select_device": "{}/select-device".format(full_host) } url = settings.webhook_url logger.debug("Initiated PATCH - {} {}".format( url, self.session.headers)) logger.verbose(format_str(data, is_json=True)) resp = requests.patch(url, data=json.dumps(data), headers=self.session.headers) logger.verbose( "[patch_endpoints] Received response code[{}]".format( resp.status_code)) logger.verbose(format_str(resp.json(), is_json=True)) self.set_confirmation_hash() except Exception: logger.alert("Failed at patch endpoints! {}".format( traceback.format_exc(limit=5))) raise
def _cmd_linsert_handler(self): key = self.parse_string_val() index = self.parse_int_val() val = self.parse_string_val() code, val = self._memory.linsert(key, index, val) logger.debug("LInserts: %s => %s, %d, %s" % (key, val, index, code)) return code, val
def _cmd_lpush_handler(self): key = self.parse_string_val() val = self.parse_string_val() code = self._memory.lpush(key, val) logger.debug("LPush: %s => %s:%d" % (key, val, code)) return code, None
def __init__(self): self.interval = None self.thread = None try: self.interval = int(settings.config_boot['keep_alive']) logger.debug('[Watchdog] interval {}'.format(self.interval)) except (KeyError, TypeError, ValueError): logger.info('[Watchdog] not enabled, keep_alive missing')
def _cmd_rename_handler(self): key = self.parse_string_val() newkey = self.parse_string_val() code = self._memory.rename(key, newkey) logger.debug("Rename: %s => %s, %s" % (key, newkey, code)) return code, None
def _cmd_hmset_handler(self): key = self.parse_string_val() val = self.parse_string_val() values = loads(val) code, val = self._memory.hmset(key, values) logger.debug("HMSet: %s => %s, %s" % (key, code, values)) return code, val
def _cmd_lrange_handler(self): key = self.parse_string_val() start = self.parse_int_val() end = self.parse_int_val() code, val = self._memory.lrange(key, start, end) logger.debug("LRange: %s, %s, %s" % (key, start, end)) return code, val
def _read_header_callback(self, buf): """ 读取数据包 """ self._server.status.inc(CacheServer.status_fields[3], len(buf)) body_length = Protocol.parse_int(buf) logger.debug("header size: %d" % body_length) self._stream.read_bytes(body_length, self._read_body_callback)
def _cmd_hget_handler(self): key = self.parse_string_val() fields = loads(self.parse_string_val()) code, val = self._memory.hget(key, fields) logger.debug("HGet: %s => %s, %s" % (key, fields, code)) return code, dumps(val)
def execute(self, *args, **options): apikey = options.get('apikey') apiuser = options.get('apiuser') sensorid = options.get('sensorid') c = XlinkClient(apikey, apiuser) r = c.sensor.get(id=sensorid) cmd = r.commands[0].cmd logger.debug("Cmd got : {}".format(cmd)) print cmd
def _cmd_set_handler(self): key = self.parse_string_val() val = self.parse_string_val() expire = self.parse_int_val() flag = self.parse_int_val() self._memory.set(key, val, expire, flag) logger.debug("Set: %s => %s, %d, %d" % (key, val, expire, flag)) return 1, None
def _cmd_expire_handler(self): key = self.parse_string_val() expire = self.parse_int_val() code, val = self._memory.expire(key, expire) logger.debug("Expire: %s => %s, %s" % (key, code, val)) return code, val
def _cmd_status_handler(self): logger.debug("Status: Get") server_status = self._status.get_status() memory_status = self._memory.get_status() ret = { 'is_slave': self._server.slave } ret.update(server_status) ret.update(memory_status) return 1, dumps(ret, indent=4, sort_keys=True)
def _send_client(self, code, data): """ 发送数据包到客户端 """ if not self._stream.closed(): pack = self._build_result(code, data) self._stream.write(pack) self._server.status.inc(CacheServer.status_fields[4], len(pack)) logger.debug('Send to client data: %d bytes' % len(pack))
def send(self, what, message={}): # lol java message.update({ 'class': self._event_name(what) }) to_send = json.dumps({ 'type': what, 'message': message }) logger.debug('%s <- %s' % (self.id, to_send)) self.socket.send(to_send + '\n')
def validate_channel(self, credential_key): try: channel_id = credential_key.split('/')[-1] owner_id = credential_key.split('/')[1] channel_template_id = self.implementer.get_channel_by_owner( owner_id, channel_id) return channel_template_id except Exception: logger.debug( f'[Polling] Unexpected error: {traceback.format_exc(limit=5)}') return False
def _process_data(self, ret_this, ret_last, threshold, r_threshold, title_format, content_format, change_type): this_end_date = ret_this.get("EndDate") quarter_info = """{}年第{}季度""".format(this_end_date.year, self.quarter_map.get(this_end_date.month)) # 转换为 保留两位的百分数 并且取绝对值 因为前期已经加上了判断增长还是下跌的定语 threshold = abs(self.re_percent_data(threshold)) r_threshold = abs(self.re_percent_data(r_threshold)) # 营业收入的单位从 元 转换为 亿元 this_operating_revenue = self.re_money_data(ret_this.get("OperatingRevenue")) # 净利润的单位 从 元 转换 为 万元 this_net_profit = self.re_money_data(ret_this.get("NPParentCompanyOwners")) last_net_profit = self.re_money_data(ret_last.get("NPParentCompanyOwners")) this_basic_EPS = self.re_decimal_data(ret_this.get("BasicEPS")) last_basic_EPS = self.re_decimal_data(ret_last.get("BasicEPS")) item = dict() item['EndDate'] = this_end_date # 最新一季的时间 item['InfoPublDate'] = ret_this.get("InfoPublDate") # 最新一季报表的发布时间 item['CompanyCode'] = self.company_code item['SecuCode'] = self.secu_code item['SecuAbbr'] = self.secu_addr item['ChangeType'] = change_type # 指标参数也保留在生成数据库中 item['NPParentCompanyOwners'] = ret_this.get("NPParentCompanyOwners") title = title_format.format(self.secu_addr, quarter_info, this_net_profit, threshold) item['Title'] = title item["SourceIds"] = ",".join(sorted([str(ret_this.get("id")), str(ret_last.get("id"))])) content = content_format.format(self.secu_addr, quarter_info, self.secu_addr, quarter_info, this_operating_revenue, r_threshold, this_net_profit, threshold, this_basic_EPS, last_net_profit, last_basic_EPS) item['Content'] = content # 从 stk_quot_idx 中获取涨跌幅(ChangePercActual) # 根据 InnerCode 对 ChangePercActual 进行关联 sql = ''' SELECT Date, InnerCode, ChangePercActual from stk_quot_idx WHERE InnerCode={} ORDER BY Date desc LIMIT 1; '''.format(self.inner_code) dc_client = self._init_pool(self.dc_cfg) _ret = dc_client.select_one(sql) dc_client.dispose() if _ret: logger.debug(_ret) change_percactual = _ret.get("ChangePercActual") item['ChangePercActual'] = change_percactual logger.info("\n" + pprint.pformat(item)) # 检查是否已经存在数据 与已存在值的偏差 self._target_init() self.check_exist_and_deviation(item, self.target_client)
def get_quarter_info(self, quarter: datetime.datetime): self._juyuan_init() sql = ''' select id, InfoPublDate, EndDate, IfMerged, IfAdjusted, NPParentCompanyOwners, OperatingRevenue, BasicEPS \ from {} where CompanyCode={} and IfMerged=1 \ and NetProfit is not NULL and OperatingRevenue is not null and BasicEPS is not null \ and EndDate = '{}' and IfAdjusted in (1,2) \ ORDER BY InfoPublDate desc, IfAdjusted asc limit 1; '''.format(self.source_table, self.company_code, quarter) # 升序为 asc 降序为 desc logger.debug(sql) ret = self.juyuan_client.select_one(sql) return ret
def _display(self): for row in self.map: blah = [] for t in row: if not t: blah.append(' ') elif t.owner: blah.append('P') elif t.resources: blah.append('g') else: blah.append('.') logger.debug(''.join(blah))
def update_credentials(self, new_credentials, old_credentials_list): """ Update all credentials in old_credentials_list with new_credentials :param new_credentials: dict :param old_credentials_list: [{ 'key': ':credential_key', 'value': :credential_dict }, ...] """ old_credentials_list = self.check_credentials_man_id(old_credentials_list, new_credentials) error_keys = [cred_['key'] for cred_ in old_credentials_list if cred_['has_error'] is True] old_credentials_list = self.filter_credentials(old_credentials_list, new_credentials.get('client_man_id')) updated_credentials = [] logger.info(f'[TokenRefresher] update_credentials: {len(old_credentials_list)} keys to update') for cred_ in old_credentials_list: key = cred_['key'] credentials = cred_['value'] channel_id = key.split('/')[-1] owner_id = key.split('/')[1] client_app_id = credentials.get('client_id', credentials.get('data', {}).get('client_id', '')) client_man_id = credentials.get('client_man_id') # replace client_id in new credentials with current client_app_id and client_man_id # to keep consistence with different apps new_credentials['client_id'] = client_app_id new_credentials['client_man_id'] = client_man_id try: channeltemplate_id = self.channel_relations[channel_id] except KeyError: channeltemplate_id = self.implementer.get_channel_template(channel_id) if channeltemplate_id and \ (settings.config_boot.get('on_pair', {}).get('update_all_channeltemplates', True) or channeltemplate_id == self.channel_template): logger.debug(f'[update_credentials] new credentials {key}') logger.info(f"[update_credentials] client_app_id: {client_app_id}; owner_id: {owner_id}; " f"channel_id: {channel_id}; channeltemplate_id: {channeltemplate_id}") self.channel_relations[channel_id] = channeltemplate_id stored = self.implementer.store_credentials(owner_id, client_app_id, channeltemplate_id, new_credentials) if stored: self.db.set_credentials(new_credentials, client_app_id, owner_id, channel_id) updated_credentials.append(key) else: logger.verbose(f'[update_credentials] Ignoring key {key}') error_keys.append(key) else: logger.verbose(f'[update_credentials] Ignoring key {key}') error_keys.append(key) return list(set(updated_credentials)), list(set(error_keys))
def receive_token(self, request): logger.debug( "\n\n\n\n\n\t\t\t\t\t********************** RECEIVE_TOKEN **************************" ) logger.debug("Received {} - {}".format(request.method, request.path)) logger.verbose("headers: {}".format(request.headers)) try: received_hash = request.headers.get("Authorization").replace( "Bearer ", "") if self._validate_confirmation_hash(received_hash): if request.is_json: received_data = request.get_json() logger.debug(f'Authorize response data: {received_data}') else: return Response(status=422) return self.handle_receive_token( received_data, request.headers["X-Client-Id"], request.headers["X-Owner-Id"]) else: logger.debug("Provided invalid confirmation hash!") return Response(status=403) except Exception: logger.error("Couldn't complete processing request, {}".format( traceback.format_exc(limit=5)))
def inbox(self, request): logger.debug( "\n\n\n\n\n\t\t\t\t\t*******************INBOX****************************" ) logger.info("Received {} - {}".format(request.method, request.path)) logger.info("\n{}".format(request.headers)) if request.is_json: logger.info(format_str(request.get_json(), is_json=True)) else: logger.info("\n{}".format(request.get_data(as_text=True))) return self.handle_request(request)
def send_notification(self): try: from systemd.daemon import notify event = threading.Event() # send first notification on init logger.debug('[Watchdog]... everything is ok') notify('WATCHDOG=1') while not event.wait(self.interval - 1): main_thread_alive = threading.main_thread().is_alive() logger.debug( '[Watchdog] is alive {}'.format(main_thread_alive)) if main_thread_alive: logger.debug('[Watchdog]...') url = settings.config_http['bind'] resp = requests.get(url) if resp.status_code == 200: logger.debug('[Watchdog] everything is ok') notify('WATCHDOG=1') else: logger.warning( f'[Watchdog] Watchdog not sent. Response status: {resp.status_code}; ' f'Response: {resp.__dict__}') else: logger.critical(f'[Watchdog] Main thread is not alive.') except (KeyError, TypeError, ValueError): logger.info('[Watchdog] not enabled, keep_alive missing') except ImportError: logger.warn('[Watchdog] systemd not imported {}'.format( traceback.format_exc(limit=5))) except: logger.alert('[Watchdog] Unexpected exception {}'.format( traceback.format_exc(limit=5)))
def read_wbs(tcs_path): """ :param tcs_path: 元素可以是二元组或一元组的列表 :return: [{记录用例信息的字典},...] """ logger.debug('tcs_path: '+str(tcs_path)) tcs = [] for i in tcs_path: if len(i) != 1 and len(i) != 2: logger.warning('序列的长度错误: ' + str(i)) continue tcs.extend(read_wb(i[0], None if len(i) == 1 else i[1])) return tcs
def read_wb(workbook, sheet=None): """ :param workbook: workbook路径的str :param sheet: sheet名字的str,如果是None,则读取整个workbook :return: [{记录用例信息的字典},...] """ logger.debug('workbook: '+workbook) wb = load_workbook(workbook) tcs = [] if sheet is not None: tcs.extend(read_sheet(wb, sheet)) else: for i in wb.sheetnames: tcs.extend(read_sheet(wb, i)) return tcs
def get_application(self): try: logger.debug(f"[get_application] Trying to get application data - {settings.webhook_url}") resp = requests.get(settings.webhook_url, headers=self.session.headers) logger.verbose("[get_application] Received response code[{}]".format(resp.status_code)) if int(resp.status_code) == 200: logger.notice("[get_application] Get application successful!") return resp.json() else: raise Exception('[get_application] Error getting application!') except Exception: logger.alert("Failed while get application! {}".format(traceback.format_exc(limit=5))) raise
def send_request(self, conf_data, channel_id): try: # validate if channel exists credentials_list = self.db.full_query( 'credential-owners/*/channels/{}'.format(channel_id)) logger.info('[Polling] {} results found for channel_id: {}'.format( len(credentials_list), channel_id)) for credential_dict in credentials_list: # try until we find valid credentials cred_key = credential_dict['key'] credentials = credential_dict['value'] is_valid = self.validate_channel(cred_key) if not is_valid: logger.debug( '[Polling] Invalid channel {}'.format(cred_key)) continue # Validate if token is valid before the request now = int(time.time()) token_expiration_date = credentials['expiration_date'] if now > token_expiration_date and not token_expiration_date == 0: logger.debug( "[Polling] access token expired {} - now:{}, expiration:{}" .format(cred_key, now, token_expiration_date)) continue resp_list = [] results = self.pool_requests.starmap( self.get_response, zip(conf_data, repeat(credentials), repeat(channel_id), repeat(cred_key))) resp_list.extend([result for result in results if result]) if resp_list: return resp_list except requests.exceptions.RequestException as e: logger.error('Request Error on polling.send_request {}'.format(e)) return False except Exception: logger.error( f'[Polling] Unknown error on polling.send_request {traceback.format_exc(limit=5)}' ) logger.notice( '[Polling] No valid credentials found for channel {}'.format( channel_id)) return False
def quote_checkout(self, request): logger.debug("\n\n\n\n\n\t\t\t\t\t*******************QUOTE_CHECKOUT****************************") try: service_id, quote_id = self._basic_quote_validation(request) result = self.implementer.quote_checkout(service_id, quote_id) return Response(status=200, response=json.dumps(result), mimetype="application/json") except (ValidationException, InvalidRequestException) as e: return Response(status=412, response=json.dumps({'text': str(e), 'code': 0}), mimetype="application/json") except UnauthorizedException as e: return Response(status=403, response=json.dumps({'text': str(e), 'code': 0}), mimetype="application/json") except Exception: logger.error("[quote_checkout] Couldn't complete processing request, {}".format(traceback.format_exc(limit=5))) return Response(status=500, response=json.dumps({'text': "Error processing request", 'code': 0}), mimetype="application/json")
def start_refresher(): """Refreshes the access token 2 days before expiry""" logger.debug('Starting token refresh thread ...') try: expiry_t = parser.parse(settings.block['expires']) current_t = datetime.now(tz.gettz(expiry_t.tzname())) time_diff = (expiry_t - current_t).total_seconds() refresh_after = time_diff - 86400 * 2 timer = threading.Timer(refresh_after, renew_token) timer.daemon = True timer.start() except Exception: logger.critical("Token expiry check - thread failed {}".format( traceback.format_exc(limit=5))) os._exit(1)
def route_setup(self, app): logger.debug("App {}".format(app)) super().route_setup(app) app.add_url_rule('/', view_func=self.starter, methods=['GET']) app.add_url_rule("/{}/receive-token".format(settings.api_version), view_func=self.receive_token, methods=['POST']) for _service in settings.services: app.add_url_rule("/{}/services/{}/authorize".format(settings.api_version, _service['id']), view_func=self.service_authorize, methods=['GET', 'POST']) app.add_url_rule("/{}/users/activate".format(settings.api_version), view_func=self.activate, methods=['POST']) app.add_url_rule("/{}/inbox".format(settings.api_version), view_func=self.inbox, methods=['POST']) app.add_url_rule(f"/{settings.api_version}/quote-simulate", view_func=self.quote_simulate, methods=['POST']) app.add_url_rule(f"/{settings.api_version}/quote-setup", view_func=self.quote_setup, methods=['POST']) app.add_url_rule(f"/{settings.api_version}/quote-checkout", view_func=self.quote_checkout, methods=['POST']) app.after_request_funcs.setdefault(app.name, []).append(self.after)
def get_key(self, key): """To get a key"s field from hash table""" try: if self.hexists(settings.redis_db, key): value = self.hget(settings.redis_db, key) logger.debug( "[DB] Key {} retrieved from database.".format(key)) try: evaluated_value = ast.literal_eval(value) except Exception: try: evaluated_value = json.loads(value) except Exception: evaluated_value = value return evaluated_value else: logger.info("[DB] Key {} not found in database.".format(key)) except Exception as e: logger.error("[DB] get_key error, {}".format(e))
def handle(self, environ, start_response): logger.info('Using overseer %s' % id(self)) socket = environ["wsgi.websocket"] logger.debug(socket.__dict__) player = Player(socket, start_response) enter_teh_infiniteh_loopah = True if self.game_started: logger.info( '%s tried to connect, but game has already started' % player.id ) logger.info('Delegating %s to new overseer...' % player.id) self.lcg.new_overseer() return self.lcg.redirect_to_overseer(environ, start_response) logger.info('%s connected' % player.id) while enter_teh_infiniteh_loopah: try: line = socket.receive() except socketerror: break if not line: break line = line.strip() if not line: break logger.debug('%s -> %s' % (player.id, line)) try: parsed = json.loads(line) except ValueError: player.exception('What the hell are you sending to me?') continue try: self.delegate(player, parsed) except (GameError, ServerError) as e: player.exception(e) logger.info('%s raised %s' % (player.id, e)) continue self.remove_player(player) try: socket.close() except socketerror: # whatever, I no more care about this socket pass logger.info('%s disconnected' % player.id)
def diff_quarters(self, _quarter_this, _quarter_last): """获取两个季度的数据库信息 进行对比以及指标计算 """ # 从数据库中获取到上一期的值 和 这一期的值, 均是原始数据 ret_this, ret_last = self.get_quarter_info(_quarter_this), self.get_quarter_info(_quarter_last) logger.debug("本期: \n{}\n".format(pprint.pformat(ret_this))) logger.debug("上期: \n{}\n".format(pprint.pformat(ret_last))) if not ret_this or not ret_last: return # # [临时]拦截数据进行测试 # ret_last = {} # ret_this = {} # 计算营业额的阈值 是根据原始数据计算出的值 operatingrevenue_this, operatingrevenue_last = ret_this.get("OperatingRevenue"), ret_last.get("OperatingRevenue") try: r_threshold = (operatingrevenue_this - operatingrevenue_last) / operatingrevenue_last except decimal.DivisionByZero: logger.warning("计算除 0 ") return logger.debug("营业额同比计算值: {}".format(r_threshold)) # 计算触发条件 净利润的阈值 是根据原始数据计算出的值 netprofit_this, netprofit_last = ret_this.get("NPParentCompanyOwners"), ret_last.get("NPParentCompanyOwners") try: threshold = (netprofit_this - netprofit_last) / netprofit_last except decimal.DivisionByZero: logger.warning("计算除 0") return logger.debug("归属于母公司净利润同比计算值: {}".format(threshold)) # 指标触发条件判断 if netprofit_this > 0 and netprofit_last > 0: if threshold >= 0.5: # 上一期和本期均是盈利的, 盈利增长, 且增长大于 50% >> 触发大幅盈增 self.inc_50(ret_this, ret_last, threshold, r_threshold) elif 0 < threshold < 0.5: # 上一期和本期均是盈利的, 盈利增长, 但盈利不大于 50% >> 触发增盈 self.inc(ret_this, ret_last, threshold, r_threshold) elif threshold < 0: # 上期和本期均是盈利的, 盈利减少 >> 触发减盈 self.reduce(ret_this, ret_last, threshold, r_threshold) elif netprofit_this < 0 and netprofit_last > 0: # 上期盈利, 本期亏损 >> 触发由盈转亏 self.gain_to_loss(ret_this, ret_last, threshold, r_threshold) elif netprofit_this > 0 and netprofit_last < 0: # 上期亏损, 本期盈利 >> 触发由亏转盈 self.loss_to_gain(ret_this, ret_last, threshold, r_threshold) elif netprofit_this < 0 and netprofit_last < 0 and abs(netprofit_this) < abs(netprofit_last): self.ease_loss(ret_this, ret_last, threshold, r_threshold) # 均亏损 亏损值减少 >> 触发减亏 elif netprofit_this < 0 and netprofit_last < 0 and abs(netprofit_this) > abs(netprofit_last): if threshold > 0.5: # 均亏损, 亏损值增大,增大幅度大于 50% self.intensify_loss_50(ret_this, ret_last, threshold, r_threshold) else: # 均亏损, 亏损值增大, 但不大于 50% self.intensify_loss(ret_this, ret_last, threshold, r_threshold)
def set_key(self, key, value): """ To set a key-field in hash table key : key of the field value : content of field add_reverse : stores another value-key combination in hash to facilitate search by value inexpensively. """ try: if type(value) is dict: value = json.dumps(value) self.hset(settings.redis_db, key, value) logger.debug("[DB] Key {} added/updated in database".format(key)) return True except Exception: logger.error("[DB] Failed to set the key at hash. {}".format( traceback.format_exc(limit=5))) return False
def patch_endpoints(self): try: _data = settings.services for _service in _data: try: if settings.config_boot.get('patch_services', True) is True: data = { 'activation_uri': '{}://{}/{}/services/{}/authorize'.format(settings.schema_pub, settings.host_pub, settings.api_version, _service['id']) } logger.debug("[patch_endpoints] Initiated PATCH - {}".format(_service.get('url'))) logger.verbose("\n{}\n".format(json.dumps(data, indent=4, sort_keys=True))) resp = requests.patch('{}/services/{}'.format(settings.api_server_full, _service['id']), data=json.dumps(data), headers=self.session.headers) logger.verbose("[patch_endpoints] Received response code[{}]".format(resp.status_code)) logger.verbose("\n{}\n".format(json.dumps(resp.json(), indent=4, sort_keys=True))) if int(resp.status_code) == 200: logger.notice("[patch_endpoints] Service setup successful!") else: raise Exception('Service setup not successful!') except Exception as e: logger.alert("[patch_endpoints] Failed to set service!\n{}".format(e)) os._exit(1) self.patch_custom_endpoints() self.set_confirmation_hash() except Exception: logger.alert("[patch_endpoints] Failed at patch endpoints! {}".format(traceback.format_exc(limit=5))) raise
def after(self, response): try: if 'Location' in response.headers: logger.debug('Redirect {} code[{}]'.format( response.headers['Location'], response.status)) else: logger.debug('Responding with status code[{}]'.format( response.status)) if response.mimetype == 'application/json': logger.verbose('\n{}\n'.format( json.dumps(json.loads(response.response[0]), indent=4, sort_keys=True))) except: logger.error('Post request logging failed!') return response
def _cmd_exists_handler(self): key = self.parse_string_val() code = self._memory.exists(key) logger.debug("Exists: %s => %s" % (key, code)) return code, None
def _cmd_get_handler(self): key = self.parse_string_val() code, val = self._memory.get(key) logger.debug("Get: %s => %s, %s" % (key, val, code)) return code, val
def _cmd_hvals_handler(self): key = self.parse_string_val() code, val = self._memory.hvals(key) logger.debug("HVALS: %s => %s, %s" % (key, code, val)) return code, val
def _cmd_hdel_handler(self): key = self.parse_string_val() fields = loads(self.parse_string_val()) code, val = self._memory.hdel(key, fields) logger.debug("HDel: %s => %s, %s" % (key, code, val)) return code, val
def _cmd_delete_handler(self): key = self.parse_string_val() val = self._memory.delete(key) logger.debug("Delete: %s => %s" % (key, val)) return val, None
def _cmd_hlen_handler(self): key = self.parse_string_val() code, val = self._memory.hlen(key) logger.debug("HLen: %s => %s, %s" % (key, code, val)) return code, val
def _cmd_ttl_handler(self): key = self.parse_string_val() code, val = self._memory.ttl(key) logger.debug("TTL: %s => %s, %s" % (key, code, val)) return code, val
def _cmd_save_handler(self): self._memory.dump_db() logger.debug("Save: Successs") return 1, None
def _cmd_hgetall_handler(self): key = self.parse_string_val() code, val = self._memory.hgetall(key) logger.debug("HGetall: %s => %s" % (key, code)) return code, dumps(val)